apache-libcloud-2.2.1/0000775000175000017500000000000013160535110014411 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/0000775000175000017500000000000013160535107016214 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/__init__.py0000664000175000017500000000474413160510114020325 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ libcloud provides a unified interface to the cloud computing resources. :var __version__: Current version of libcloud """ import os import codecs from libcloud.base import DriverType # NOQA from libcloud.base import DriverTypeFactoryMap # NOQA from libcloud.base import get_driver # NOQA try: import paramiko have_paramiko = True except ImportError: have_paramiko = False __all__ = [ '__version__', 'enable_debug' ] __version__ = '2.2.1' def enable_debug(fo): """ Enable library wide debugging to a file-like object. :param fo: Where to append debugging information :type fo: File like object, only write operations are used. """ from libcloud.common.base import Connection from libcloud.utils.loggingconnection import LoggingConnection LoggingConnection.log = fo Connection.conn_class = LoggingConnection def _init_once(): """ Utility function that is ran once on Library import. This checks for the LIBCLOUD_DEBUG environment variable, which if it exists is where we will log debug information about the provider transports. """ path = os.getenv('LIBCLOUD_DEBUG') if path: mode = 'a' # Special case for /dev/stderr and /dev/stdout on Python 3. from libcloud.utils.py3 import PY3 # Opening those files in append mode will throw "illegal seek" # exception there. # Late import to avoid setup.py related side affects if path in ['/dev/stderr', '/dev/stdout'] and PY3: mode = 'w' fo = codecs.open(path, mode, encoding='utf8') enable_debug(fo) if have_paramiko: paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG) _init_once() apache-libcloud-2.2.1/libcloud/data/0000775000175000017500000000000013160535107017125 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/data/pricing.json0000664000175000017500000013335113153541406021462 0ustar kamikami00000000000000{ "compute": { "bluebox": { "1gb": 0.15, "2gb": 0.25, "4gb": 0.35, "8gb": 0.45 }, "cloudsigma_lvs": { "high-cpu-extra-large": 0.0, "high-cpu-medium": 0.0, "high-memory-double-extra-large": 0.0, "high-memory-extra-large": 0.0, "micro-high-cpu": 0.0, "micro-regular": 0.0, "standard-extra-large": 0.0, "standard-large": 0.0, "standard-small": 0.0 }, "cloudsigma_zrh": { "high-cpu-extra-large": 0.78, "high-cpu-medium": 0.211, "high-memory-double-extra-large": 1.383, "high-memory-extra-large": 0.642, "micro-high-cpu": 0.381, "micro-regular": 0.0548, "standard-extra-large": 0.762, "standard-large": 0.381, "standard-small": 0.0796 }, "ec2_ap_northeast": { "c1.medium": 0.158, "c1.xlarge": 0.632, "c3.large": 0.128, "c3.xlarge": 0.255, "c3.2xlarge": 0.511, "c3.4xlarge": 1.021, "c3.8xlarge": 2.043, "c4.large": 0.114, "c4.xlarge": 0.227, "c4.2xlarge": 0.454, "c4.4xlarge": 0.907, "c4.8xlarge": 1.815, "cc2.8xlarge": 2.349, "cr1.8xlarge": 4.105, "d2.xlarge": 0.844, "d2.2xlarge": 1.688, "d2.4xlarge": 3.376, "d2.8xlarge": 6.752, "g2.2xlarge": 0.898, "g2.8xlarge": 3.592, "hi1.4xlarge": 3.276, "hs1.8xlarge": 5.4, "i2.xlarge": 1.001, "i2.2xlarge": 2.001, "i2.4xlarge": 4.002, "i2.8xlarge": 8.004, "i3.large": 0.183, "i3.xlarge": 0.366, "i3.2xlarge": 0.732, "i3.4xlarge": 1.464, "i3.8xlarge": 2.928, "i3.16xlarge": 5.856, "m1.small": 0.061, "m1.medium": 0.122, "m1.large": 0.243, "m1.xlarge": 0.486, "m2.xlarge": 0.287, "m2.2xlarge": 0.575, "m2.4xlarge": 1.15, "m3.medium": 0.096, "m3.large": 0.193, "m3.xlarge": 0.385, "m3.2xlarge": 0.77, "m4.large": 0.123, "m4.xlarge": 0.246, "m4.2xlarge": 0.492, "m4.4xlarge": 0.984, "m4.10xlarge": 2.46, "m4.16xlarge": 3.936, "p2.xlarge": 1.465, "p2.8xlarge": 11.72, "p2.16xlarge": 23.44, "r3.large": 0.2, "r3.xlarge": 0.399, "r3.2xlarge": 0.798, "r3.4xlarge": 1.596, "r3.8xlarge": 3.192, "r4.large": 0.16, "r4.xlarge": 0.32, "r4.2xlarge": 0.64, "r4.4xlarge": 1.28, "r4.8xlarge": 2.56, "r4.16xlarge": 5.12, "t1.micro": 0.026, "t2.micro": 0.016, "t2.small": 0.032, "t2.medium": 0.064, "t2.large": 0.128, "t2.xlarge": 0.256, "t2.nano": 0.008, "t2.2xlarge": 0.512, "x1.16xlarge": 9.671, "x1.32xlarge": 19.341 }, "ec2_ap_south_1": { "c4.large": 0.11, "c4.xlarge": 0.22, "c4.2xlarge": 0.439, "c4.4xlarge": 0.878, "c4.8xlarge": 1.756, "d2.xlarge": 0.827, "d2.2xlarge": 1.653, "d2.4xlarge": 3.306, "d2.8xlarge": 6.612, "i2.xlarge": 0.967, "i2.2xlarge": 1.933, "i2.4xlarge": 3.867, "i2.8xlarge": 7.733, "i3.large": 0.177, "i3.xlarge": 0.354, "i3.2xlarge": 0.708, "i3.4xlarge": 1.416, "i3.8xlarge": 2.832, "i3.16xlarge": 5.664, "m4.large": 0.123, "m4.xlarge": 0.246, "m4.2xlarge": 0.492, "m4.4xlarge": 0.984, "m4.10xlarge": 2.46, "m4.16xlarge": 3.936, "r3.large": 0.19, "r3.xlarge": 0.379, "r3.2xlarge": 0.758, "r3.4xlarge": 1.516, "r3.8xlarge": 3.032, "r4.large": 0.152, "r4.xlarge": 0.304, "r4.2xlarge": 0.608, "r4.4xlarge": 1.216, "r4.8xlarge": 2.432, "r4.16xlarge": 4.864, "t2.micro": 0.015, "t2.small": 0.03, "t2.medium": 0.059, "t2.large": 0.119, "t2.xlarge": 0.238, "t2.nano": 0.0074, "t2.2xlarge": 0.476, "x1.16xlarge": 9.187, "x1.32xlarge": 18.374 }, "ec2_ap_southeast": { "c1.medium": 0.164, "c1.xlarge": 0.655, "c3.large": 0.132, "c3.xlarge": 0.265, "c3.2xlarge": 0.529, "c3.4xlarge": 1.058, "c3.8xlarge": 2.117, "c4.large": 0.115, "c4.xlarge": 0.231, "c4.2xlarge": 0.462, "c4.4xlarge": 0.924, "c4.8xlarge": 1.848, "d2.xlarge": 0.87, "d2.2xlarge": 1.74, "d2.4xlarge": 3.48, "d2.8xlarge": 6.96, "g2.2xlarge": 1.0, "g2.8xlarge": 4.0, "hs1.8xlarge": 5.57, "i2.xlarge": 1.018, "i2.2xlarge": 2.035, "i2.4xlarge": 4.07, "i2.8xlarge": 8.14, "i3.large": 0.187, "i3.xlarge": 0.374, "i3.2xlarge": 0.748, "i3.4xlarge": 1.496, "i3.8xlarge": 2.992, "i3.16xlarge": 5.984, "m1.small": 0.058, "m1.medium": 0.117, "m1.large": 0.233, "m1.xlarge": 0.467, "m2.xlarge": 0.296, "m2.2xlarge": 0.592, "m2.4xlarge": 1.183, "m3.medium": 0.098, "m3.large": 0.196, "m3.xlarge": 0.392, "m3.2xlarge": 0.784, "m4.large": 0.125, "m4.xlarge": 0.25, "m4.2xlarge": 0.5, "m4.4xlarge": 1.0, "m4.10xlarge": 2.5, "m4.16xlarge": 4.0, "r3.large": 0.2, "r3.xlarge": 0.399, "r3.2xlarge": 0.798, "r3.4xlarge": 1.596, "r3.8xlarge": 3.192, "r4.large": 0.16, "r4.xlarge": 0.32, "r4.2xlarge": 0.64, "r4.4xlarge": 1.28, "r4.8xlarge": 2.56, "r4.16xlarge": 5.12, "t1.micro": 0.02, "t2.micro": 0.015, "t2.small": 0.03, "t2.medium": 0.06, "t2.large": 0.12, "t2.xlarge": 0.24, "t2.nano": 0.0075, "t2.2xlarge": 0.48, "x1.16xlarge": 9.671, "x1.32xlarge": 19.341 }, "ec2_ap_southeast_2": { "c1.medium": 0.164, "c1.xlarge": 0.655, "c3.large": 0.132, "c3.xlarge": 0.265, "c3.2xlarge": 0.529, "c3.4xlarge": 1.058, "c3.8xlarge": 2.117, "c4.large": 0.13, "c4.xlarge": 0.261, "c4.2xlarge": 0.522, "c4.4xlarge": 1.042, "c4.8xlarge": 2.085, "d2.xlarge": 0.87, "d2.2xlarge": 1.74, "d2.4xlarge": 3.48, "d2.8xlarge": 6.96, "g2.2xlarge": 0.898, "g2.8xlarge": 3.592, "hs1.8xlarge": 5.57, "i2.xlarge": 1.018, "i2.2xlarge": 2.035, "i2.4xlarge": 4.07, "i2.8xlarge": 8.14, "i3.large": 0.187, "i3.xlarge": 0.374, "i3.2xlarge": 0.748, "i3.4xlarge": 1.496, "i3.8xlarge": 2.992, "i3.16xlarge": 5.984, "m1.small": 0.058, "m1.medium": 0.117, "m1.large": 0.233, "m1.xlarge": 0.467, "m2.xlarge": 0.296, "m2.2xlarge": 0.592, "m2.4xlarge": 1.183, "m3.medium": 0.093, "m3.large": 0.186, "m3.xlarge": 0.372, "m3.2xlarge": 0.745, "m4.large": 0.125, "m4.xlarge": 0.25, "m4.2xlarge": 0.5, "m4.4xlarge": 1.0, "m4.10xlarge": 2.5, "m4.16xlarge": 4.0, "r3.large": 0.2, "r3.xlarge": 0.399, "r3.2xlarge": 0.798, "r3.4xlarge": 1.596, "r3.8xlarge": 3.192, "r4.large": 0.16, "r4.xlarge": 0.319, "r4.2xlarge": 0.638, "r4.4xlarge": 1.277, "r4.8xlarge": 2.554, "r4.16xlarge": 5.107, "t1.micro": 0.02, "t2.micro": 0.016, "t2.small": 0.032, "t2.medium": 0.064, "t2.large": 0.128, "t2.xlarge": 0.256, "t2.nano": 0.008, "t2.2xlarge": 0.512, "x1.16xlarge": 9.671, "x1.32xlarge": 19.341 }, "ec2_ca_central": { "c4.large": 0.11, "c4.xlarge": 0.218, "c4.2xlarge": 0.438, "c4.4xlarge": 0.876, "c4.8xlarge": 1.75, "d2.xlarge": 0.759, "d2.2xlarge": 1.518, "d2.4xlarge": 3.036, "d2.8xlarge": 6.072, "i3.large": 0.172, "i3.xlarge": 0.344, "i3.2xlarge": 0.688, "i3.4xlarge": 1.376, "i3.8xlarge": 2.752, "i3.16xlarge": 5.504, "m4.large": 0.111, "m4.xlarge": 0.222, "m4.2xlarge": 0.444, "m4.4xlarge": 0.888, "m4.10xlarge": 2.22, "m4.16xlarge": 3.552, "r4.large": 0.146, "r4.xlarge": 0.292, "r4.2xlarge": 0.584, "r4.4xlarge": 1.168, "r4.8xlarge": 2.336, "r4.16xlarge": 4.672, "t2.micro": 0.013, "t2.small": 0.026, "t2.medium": 0.052, "t2.large": 0.103, "t2.xlarge": 0.206, "t2.nano": 0.0065, "t2.2xlarge": 0.412, "x1.16xlarge": 7.336, "x1.32xlarge": 14.672 }, "ec2_ca_central_1": { "c4.large": 0.11, "c4.xlarge": 0.218, "c4.2xlarge": 0.438, "c4.4xlarge": 0.876, "c4.8xlarge": 1.75, "d2.xlarge": 0.759, "d2.2xlarge": 1.518, "d2.4xlarge": 3.036, "d2.8xlarge": 6.072, "i3.large": 0.172, "i3.xlarge": 0.344, "i3.2xlarge": 0.688, "i3.4xlarge": 1.376, "i3.8xlarge": 2.752, "i3.16xlarge": 5.504, "m4.large": 0.111, "m4.xlarge": 0.222, "m4.2xlarge": 0.444, "m4.4xlarge": 0.888, "m4.10xlarge": 2.22, "m4.16xlarge": 3.552, "r4.large": 0.146, "r4.xlarge": 0.292, "r4.2xlarge": 0.584, "r4.4xlarge": 1.168, "r4.8xlarge": 2.336, "r4.16xlarge": 4.672, "t2.micro": 0.013, "t2.small": 0.026, "t2.medium": 0.052, "t2.large": 0.103, "t2.xlarge": 0.206, "t2.nano": 0.0065, "t2.2xlarge": 0.412, "x1.16xlarge": 7.336, "x1.32xlarge": 14.672 }, "ec2_eu_central": { "c3.large": 0.129, "c3.xlarge": 0.258, "c3.2xlarge": 0.516, "c3.4xlarge": 1.032, "c3.8xlarge": 2.064, "c4.large": 0.114, "c4.xlarge": 0.227, "c4.2xlarge": 0.454, "c4.4xlarge": 0.909, "c4.8xlarge": 1.817, "d2.xlarge": 0.794, "d2.2xlarge": 1.588, "d2.4xlarge": 3.176, "d2.8xlarge": 6.352, "g2.2xlarge": 0.772, "g2.8xlarge": 3.088, "i2.xlarge": 1.013, "i2.2xlarge": 2.026, "i2.4xlarge": 4.051, "i2.8xlarge": 8.102, "i3.large": 0.186, "i3.xlarge": 0.372, "i3.2xlarge": 0.744, "i3.4xlarge": 1.488, "i3.8xlarge": 2.976, "i3.16xlarge": 5.952, "m3.medium": 0.079, "m3.large": 0.158, "m3.xlarge": 0.315, "m3.2xlarge": 0.632, "m4.large": 0.12, "m4.xlarge": 0.24, "m4.2xlarge": 0.48, "m4.4xlarge": 0.96, "m4.10xlarge": 2.4, "m4.16xlarge": 3.84, "r3.large": 0.2, "r3.xlarge": 0.4, "r3.2xlarge": 0.8, "r3.4xlarge": 1.6, "r3.8xlarge": 3.201, "r4.large": 0.16, "r4.xlarge": 0.32, "r4.2xlarge": 0.64, "r4.4xlarge": 1.28, "r4.8xlarge": 2.561, "r4.16xlarge": 5.122, "t2.micro": 0.014, "t2.small": 0.027, "t2.medium": 0.054, "t2.large": 0.108, "t2.xlarge": 0.216, "t2.nano": 0.0068, "t2.2xlarge": 0.432, "x1.16xlarge": 9.337, "x1.32xlarge": 18.674 }, "ec2_eu_west": { "c1.medium": 0.148, "c1.xlarge": 0.592, "c3.large": 0.12, "c3.xlarge": 0.239, "c3.2xlarge": 0.478, "c3.4xlarge": 0.956, "c3.8xlarge": 1.912, "c4.large": 0.113, "c4.xlarge": 0.226, "c4.2xlarge": 0.453, "c4.4xlarge": 0.905, "c4.8xlarge": 1.811, "cc2.8xlarge": 2.25, "cg1.4xlarge": 2.36, "cr1.8xlarge": 3.75, "d2.xlarge": 0.735, "d2.2xlarge": 1.47, "d2.4xlarge": 2.94, "d2.8xlarge": 5.88, "g2.2xlarge": 0.702, "g2.8xlarge": 2.808, "hi1.4xlarge": 3.1, "hs1.8xlarge": 4.9, "i2.xlarge": 0.938, "i2.2xlarge": 1.876, "i2.4xlarge": 3.751, "i2.8xlarge": 7.502, "i3.large": 0.172, "i3.xlarge": 0.344, "i3.2xlarge": 0.688, "i3.4xlarge": 1.376, "i3.8xlarge": 2.752, "i3.16xlarge": 5.504, "m1.small": 0.047, "m1.medium": 0.095, "m1.large": 0.19, "m1.xlarge": 0.379, "m2.xlarge": 0.275, "m2.2xlarge": 0.55, "m2.4xlarge": 1.1, "m3.medium": 0.073, "m3.large": 0.146, "m3.xlarge": 0.293, "m3.2xlarge": 0.585, "m4.large": 0.111, "m4.xlarge": 0.222, "m4.2xlarge": 0.444, "m4.4xlarge": 0.888, "m4.10xlarge": 2.22, "m4.16xlarge": 3.552, "p2.xlarge": 0.972, "p2.8xlarge": 7.776, "p2.16xlarge": 15.552, "r3.large": 0.185, "r3.xlarge": 0.371, "r3.2xlarge": 0.741, "r3.4xlarge": 1.482, "r3.8xlarge": 2.964, "r4.large": 0.148, "r4.xlarge": 0.296, "r4.2xlarge": 0.593, "r4.4xlarge": 1.186, "r4.8xlarge": 2.371, "r4.16xlarge": 4.742, "t1.micro": 0.02, "t2.micro": 0.013, "t2.small": 0.025, "t2.medium": 0.05, "t2.large": 0.101, "t2.xlarge": 0.202, "t2.nano": 0.0063, "t2.2xlarge": 0.404, "x1.16xlarge": 8.003, "x1.32xlarge": 16.006 }, "ec2_eu_west_2": { "c4.large": 0.119, "c4.xlarge": 0.237, "c4.2xlarge": 0.476, "c4.4xlarge": 0.95, "c4.8xlarge": 1.902, "d2.xlarge": 0.772, "d2.2xlarge": 1.544, "d2.4xlarge": 3.087, "d2.8xlarge": 6.174, "i3.large": 0.181, "i3.xlarge": 0.362, "i3.2xlarge": 0.724, "i3.4xlarge": 1.448, "i3.8xlarge": 2.896, "i3.16xlarge": 5.792, "m4.large": 0.125, "m4.xlarge": 0.25, "m4.2xlarge": 0.499, "m4.4xlarge": 0.998, "m4.10xlarge": 2.496, "m4.16xlarge": 3.993, "r4.large": 0.156, "r4.xlarge": 0.312, "r4.2xlarge": 0.624, "r4.4xlarge": 1.248, "r4.8xlarge": 2.496, "r4.16xlarge": 4.992, "t2.micro": 0.014, "t2.small": 0.026, "t2.medium": 0.052, "t2.large": 0.106, "t2.xlarge": 0.212, "t2.nano": 0.0066, "t2.2xlarge": 0.424, "x1.16xlarge": 8.403, "x1.32xlarge": 16.806 }, "ec2_eu_west_london": { "c4.large": 0.119, "c4.xlarge": 0.237, "c4.2xlarge": 0.476, "c4.4xlarge": 0.95, "c4.8xlarge": 1.902, "d2.xlarge": 0.772, "d2.2xlarge": 1.544, "d2.4xlarge": 3.087, "d2.8xlarge": 6.174, "i3.large": 0.181, "i3.xlarge": 0.362, "i3.2xlarge": 0.724, "i3.4xlarge": 1.448, "i3.8xlarge": 2.896, "i3.16xlarge": 5.792, "m4.large": 0.116, "m4.xlarge": 0.232, "m4.2xlarge": 0.464, "m4.4xlarge": 0.928, "m4.10xlarge": 2.32, "m4.16xlarge": 3.712, "r4.large": 0.156, "r4.xlarge": 0.312, "r4.2xlarge": 0.624, "r4.4xlarge": 1.248, "r4.8xlarge": 2.496, "r4.16xlarge": 4.992, "t2.micro": 0.014, "t2.small": 0.026, "t2.medium": 0.052, "t2.large": 0.106, "t2.xlarge": 0.212, "t2.nano": 0.0066, "t2.2xlarge": 0.424, "x1.16xlarge": 8.403, "x1.32xlarge": 16.806 }, "ec2_sa_east": { "c1.medium": 0.179, "c1.xlarge": 0.718, "c3.large": 0.163, "c3.xlarge": 0.325, "c3.2xlarge": 0.65, "c3.4xlarge": 1.3, "c3.8xlarge": 2.6, "c4.large": 0.155, "c4.xlarge": 0.309, "c4.2xlarge": 0.618, "c4.4xlarge": 1.235, "c4.8xlarge": 2.47, "i3.large": 0.286, "i3.xlarge": 0.572, "i3.2xlarge": 1.144, "i3.4xlarge": 2.288, "i3.8xlarge": 4.576, "i3.16xlarge": 9.152, "m1.small": 0.058, "m1.medium": 0.117, "m1.large": 0.233, "m1.xlarge": 0.467, "m2.xlarge": 0.323, "m2.2xlarge": 0.645, "m2.4xlarge": 1.291, "m3.medium": 0.095, "m3.large": 0.19, "m3.xlarge": 0.381, "m3.2xlarge": 0.761, "m4.large": 0.159, "m4.xlarge": 0.318, "m4.2xlarge": 0.636, "m4.4xlarge": 1.272, "m4.10xlarge": 3.18, "m4.16xlarge": 5.088, "r3.large": 0.35, "r3.xlarge": 0.7, "r3.2xlarge": 1.399, "r3.4xlarge": 2.799, "r3.8xlarge": 5.597, "r4.large": 0.28, "r4.xlarge": 0.56, "r4.2xlarge": 1.12, "r4.4xlarge": 2.24, "r4.8xlarge": 4.48, "r4.16xlarge": 8.96, "t1.micro": 0.027, "t2.micro": 0.02, "t2.small": 0.041, "t2.medium": 0.081, "t2.large": 0.162, "t2.xlarge": 0.324, "t2.nano": 0.0101, "t2.2xlarge": 0.648 }, "ec2_us_east": { "c1.medium": 0.13, "c1.xlarge": 0.52, "c3.large": 0.105, "c3.xlarge": 0.21, "c3.2xlarge": 0.42, "c3.4xlarge": 0.84, "c3.8xlarge": 1.68, "c4.large": 0.1, "c4.xlarge": 0.199, "c4.2xlarge": 0.398, "c4.4xlarge": 0.796, "c4.8xlarge": 1.591, "cc2.8xlarge": 2.0, "cg1.4xlarge": 2.1, "cr1.8xlarge": 3.5, "d2.xlarge": 0.69, "d2.2xlarge": 1.38, "d2.4xlarge": 2.76, "d2.8xlarge": 5.52, "f1.2xlarge": 1.65, "f1.16xlarge": 13.2, "g2.2xlarge": 0.65, "g2.8xlarge": 2.6, "hi1.4xlarge": 3.1, "hs1.8xlarge": 4.6, "i2.xlarge": 0.853, "i2.2xlarge": 1.705, "i2.4xlarge": 3.41, "i2.8xlarge": 6.82, "i3.large": 0.156, "i3.xlarge": 0.312, "i3.2xlarge": 0.624, "i3.4xlarge": 1.248, "i3.8xlarge": 2.496, "i3.16xlarge": 4.992, "m1.small": 0.044, "m1.medium": 0.087, "m1.large": 0.175, "m1.xlarge": 0.35, "m2.xlarge": 0.245, "m2.2xlarge": 0.49, "m2.4xlarge": 0.98, "m3.medium": 0.067, "m3.large": 0.133, "m3.xlarge": 0.266, "m3.2xlarge": 0.532, "m4.large": 0.1, "m4.xlarge": 0.2, "m4.2xlarge": 0.4, "m4.4xlarge": 0.8, "m4.10xlarge": 2.0, "m4.16xlarge": 3.2, "p2.xlarge": 0.9, "p2.8xlarge": 7.2, "p2.16xlarge": 14.4, "r3.large": 0.166, "r3.xlarge": 0.333, "r3.2xlarge": 0.665, "r3.4xlarge": 1.33, "r3.8xlarge": 2.66, "r4.large": 0.133, "r4.xlarge": 0.266, "r4.2xlarge": 0.532, "r4.4xlarge": 1.064, "r4.8xlarge": 2.128, "r4.16xlarge": 4.256, "t1.micro": 0.02, "t2.micro": 0.012, "t2.small": 0.023, "t2.medium": 0.047, "t2.large": 0.094, "t2.xlarge": 0.188, "t2.nano": 0.0059, "t2.2xlarge": 0.376, "x1.16xlarge": 6.669, "x1.32xlarge": 13.338 }, "ec2_us_east_ohio": { "c4.large": 0.1, "c4.xlarge": 0.199, "c4.2xlarge": 0.398, "c4.4xlarge": 0.796, "c4.8xlarge": 1.591, "d2.xlarge": 0.69, "d2.2xlarge": 1.38, "d2.4xlarge": 2.76, "d2.8xlarge": 5.52, "i2.xlarge": 0.853, "i2.2xlarge": 1.705, "i2.4xlarge": 3.41, "i2.8xlarge": 6.82, "i3.large": 0.156, "i3.xlarge": 0.312, "i3.2xlarge": 0.624, "i3.4xlarge": 1.248, "i3.8xlarge": 2.496, "i3.16xlarge": 4.992, "m4.large": 0.1, "m4.xlarge": 0.2, "m4.2xlarge": 0.4, "m4.4xlarge": 0.8, "m4.10xlarge": 2.0, "m4.16xlarge": 3.2, "p2.xlarge": 0.9, "p2.8xlarge": 7.2, "p2.16xlarge": 14.4, "r3.large": 0.166, "r3.xlarge": 0.333, "r3.2xlarge": 0.665, "r3.4xlarge": 1.33, "r3.8xlarge": 2.66, "r4.large": 0.133, "r4.xlarge": 0.266, "r4.2xlarge": 0.532, "r4.4xlarge": 1.064, "r4.8xlarge": 2.128, "r4.16xlarge": 4.256, "t2.micro": 0.012, "t2.small": 0.023, "t2.medium": 0.047, "t2.large": 0.094, "t2.xlarge": 0.188, "t2.nano": 0.0059, "t2.2xlarge": 0.376, "x1.16xlarge": 6.669, "x1.32xlarge": 13.338 }, "ec2_us_govwest": { "c1.medium": 0.157, "c1.xlarge": 0.628, "c3.large": 0.126, "c3.xlarge": 0.252, "c3.2xlarge": 0.504, "c3.4xlarge": 1.008, "c3.8xlarge": 2.016, "c4.large": 0.12, "c4.xlarge": 0.239, "c4.2xlarge": 0.479, "c4.4xlarge": 0.958, "c4.8xlarge": 1.915, "cc2.8xlarge": 2.25, "d2.xlarge": 0.828, "d2.2xlarge": 1.656, "d2.4xlarge": 3.312, "d2.8xlarge": 6.624, "hs1.8xlarge": 5.52, "i2.xlarge": 1.023, "i2.2xlarge": 2.046, "i2.4xlarge": 4.092, "i2.8xlarge": 8.184, "i3.large": 0.188, "i3.xlarge": 0.376, "i3.2xlarge": 0.752, "i3.4xlarge": 1.504, "i3.8xlarge": 3.008, "i3.16xlarge": 6.016, "m1.small": 0.053, "m1.medium": 0.106, "m1.large": 0.211, "m1.xlarge": 0.423, "m2.xlarge": 0.293, "m2.2xlarge": 0.586, "m2.4xlarge": 1.171, "m3.medium": 0.084, "m3.large": 0.168, "m3.xlarge": 0.336, "m3.2xlarge": 0.672, "m4.large": 0.126, "m4.xlarge": 0.252, "m4.2xlarge": 0.504, "m4.4xlarge": 1.008, "m4.10xlarge": 2.52, "m4.16xlarge": 4.032, "p2.xlarge": 1.08, "p2.8xlarge": 8.64, "p2.16xlarge": 17.28, "r3.large": 0.2, "r3.xlarge": 0.399, "r3.2xlarge": 0.798, "r3.4xlarge": 1.596, "r3.8xlarge": 3.192, "r4.large": 0.16, "r4.xlarge": 0.319, "r4.2xlarge": 0.638, "r4.4xlarge": 1.277, "r4.8xlarge": 2.554, "r4.16xlarge": 5.107, "t1.micro": 0.024, "t2.micro": 0.014, "t2.small": 0.028, "t2.medium": 0.056, "t2.large": 0.112, "t2.xlarge": 0.224, "t2.nano": 0.0068, "t2.2xlarge": 0.448, "x1.16xlarge": 8.003, "x1.32xlarge": 16.006 }, "ec2_us_west": { "c1.medium": 0.148, "c1.xlarge": 0.592, "c3.large": 0.12, "c3.xlarge": 0.239, "c3.2xlarge": 0.478, "c3.4xlarge": 0.956, "c3.8xlarge": 1.912, "c4.large": 0.124, "c4.xlarge": 0.249, "c4.2xlarge": 0.498, "c4.4xlarge": 0.997, "c4.8xlarge": 1.993, "d2.xlarge": 0.781, "d2.2xlarge": 1.563, "d2.4xlarge": 3.125, "d2.8xlarge": 6.25, "g2.2xlarge": 0.702, "g2.8xlarge": 2.808, "i2.xlarge": 0.938, "i2.2xlarge": 1.876, "i2.4xlarge": 3.751, "i2.8xlarge": 7.502, "i3.large": 0.172, "i3.xlarge": 0.344, "i3.2xlarge": 0.688, "i3.4xlarge": 1.376, "i3.8xlarge": 2.752, "i3.16xlarge": 5.504, "m1.small": 0.047, "m1.medium": 0.095, "m1.large": 0.19, "m1.xlarge": 0.379, "m2.xlarge": 0.275, "m2.2xlarge": 0.55, "m2.4xlarge": 1.1, "m3.medium": 0.077, "m3.large": 0.154, "m3.xlarge": 0.308, "m3.2xlarge": 0.616, "m4.large": 0.117, "m4.xlarge": 0.234, "m4.2xlarge": 0.468, "m4.4xlarge": 0.936, "m4.10xlarge": 2.34, "m4.16xlarge": 3.744, "r3.large": 0.185, "r3.xlarge": 0.371, "r3.2xlarge": 0.741, "r3.4xlarge": 1.482, "r3.8xlarge": 2.964, "r4.large": 0.148, "r4.xlarge": 0.296, "r4.2xlarge": 0.593, "r4.4xlarge": 1.186, "r4.8xlarge": 2.371, "r4.16xlarge": 4.742, "t1.micro": 0.025, "t2.micro": 0.015, "t2.small": 0.031, "t2.medium": 0.061, "t2.large": 0.122, "t2.xlarge": 0.244, "t2.nano": 0.0077, "t2.2xlarge": 0.488 }, "ec2_us_west_oregon": { "c1.medium": 0.13, "c1.xlarge": 0.52, "c3.large": 0.105, "c3.xlarge": 0.21, "c3.2xlarge": 0.42, "c3.4xlarge": 0.84, "c3.8xlarge": 1.68, "c4.large": 0.1, "c4.xlarge": 0.199, "c4.2xlarge": 0.398, "c4.4xlarge": 0.796, "c4.8xlarge": 1.591, "cc2.8xlarge": 2.0, "cr1.8xlarge": 3.5, "d2.xlarge": 0.69, "d2.2xlarge": 1.38, "d2.4xlarge": 2.76, "d2.8xlarge": 5.52, "g2.2xlarge": 0.65, "g2.8xlarge": 2.6, "hi1.4xlarge": 3.1, "hs1.8xlarge": 4.6, "i2.xlarge": 0.853, "i2.2xlarge": 1.705, "i2.4xlarge": 3.41, "i2.8xlarge": 6.82, "i3.large": 0.156, "i3.xlarge": 0.312, "i3.2xlarge": 0.624, "i3.4xlarge": 1.248, "i3.8xlarge": 2.496, "i3.16xlarge": 4.992, "m1.small": 0.044, "m1.medium": 0.087, "m1.large": 0.175, "m1.xlarge": 0.35, "m2.xlarge": 0.245, "m2.2xlarge": 0.49, "m2.4xlarge": 0.98, "m3.medium": 0.067, "m3.large": 0.133, "m3.xlarge": 0.266, "m3.2xlarge": 0.532, "m4.large": 0.1, "m4.xlarge": 0.2, "m4.2xlarge": 0.4, "m4.4xlarge": 0.8, "m4.10xlarge": 2.0, "m4.16xlarge": 3.2, "p2.xlarge": 0.9, "p2.8xlarge": 7.2, "p2.16xlarge": 14.4, "r3.large": 0.166, "r3.xlarge": 0.333, "r3.2xlarge": 0.665, "r3.4xlarge": 1.33, "r3.8xlarge": 2.66, "r4.large": 0.133, "r4.xlarge": 0.266, "r4.2xlarge": 0.532, "r4.4xlarge": 1.064, "r4.8xlarge": 2.128, "r4.16xlarge": 4.256, "t1.micro": 0.02, "t2.micro": 0.012, "t2.small": 0.023, "t2.medium": 0.047, "t2.large": 0.094, "t2.xlarge": 0.188, "t2.nano": 0.0059, "t2.2xlarge": 0.376, "x1.16xlarge": 6.669, "x1.32xlarge": 13.338 }, "elastichosts": { "small": 0.1, "medium": 0.223, "large": 0.378, "extra-large": 0.579, "high-cpu-extra-large": 0.77, "high-cpu-medium": 0.18 }, "gandi": { "small": 0.02, "medium": 0.03, "large": 0.06, "x-large": 0.12, "1": 0.02 }, "gogrid": { "1GB": 0.19, "2GB": 0.38, "4GB": 0.76, "8GB": 1.52, "16GB": 3.04, "24GB": 4.56, "512MB": 0.095 }, "google_asia": { "f1-micro": 0.009, "f1-micro-preemptible": 0.005, "g1-small": 0.03, "g1-small-preemptible": 0.01, "n1-highcpu-2": 0.084, "n1-highcpu-2-preemptible": 0.022, "n1-highcpu-4": 0.168, "n1-highcpu-4-preemptible": 0.044, "n1-highcpu-8": 0.336, "n1-highcpu-8-preemptible": 0.088, "n1-highcpu-16": 0.672, "n1-highcpu-16-preemptible": 0.176, "n1-highcpu-32": 1.344, "n1-highcpu-32-preemptible": 0.352, "n1-highmem-2": 0.139, "n1-highmem-2-preemptible": 0.0385, "n1-highmem-4": 0.278, "n1-highmem-4-preemptible": 0.077, "n1-highmem-8": 0.556, "n1-highmem-8-preemptible": 0.154, "n1-highmem-16": 1.112, "n1-highmem-16-preemptible": 0.308, "n1-highmem-32": 2.224, "n1-highmem-32-preemptible": 0.616, "n1-standard-1": 0.055, "n1-standard-1-preemptible": 0.0165, "n1-standard-2": 0.11, "n1-standard-2-preemptible": 0.033, "n1-standard-4": 0.22, "n1-standard-4-preemptible": 0.066, "n1-standard-8": 0.44, "n1-standard-8-preemptible": 0.132, "n1-standard-16": 0.88, "n1-standard-16-preemptible": 0.264, "n1-standard-32": 1.76, "n1-standard-32-preemptible": 0.528 }, "google_europe": { "f1-micro": 0.009, "f1-micro-preemptible": 0.005, "g1-small": 0.03, "g1-small-preemptible": 0.01, "n1-highcpu-2": 0.084, "n1-highcpu-2-preemptible": 0.022, "n1-highcpu-4": 0.168, "n1-highcpu-4-preemptible": 0.044, "n1-highcpu-8": 0.336, "n1-highcpu-8-preemptible": 0.088, "n1-highcpu-16": 0.672, "n1-highcpu-16-preemptible": 0.176, "n1-highcpu-32": 1.344, "n1-highcpu-32-preemptible": 0.352, "n1-highmem-2": 0.139, "n1-highmem-2-preemptible": 0.0385, "n1-highmem-4": 0.278, "n1-highmem-4-preemptible": 0.077, "n1-highmem-8": 0.556, "n1-highmem-8-preemptible": 0.154, "n1-highmem-16": 1.112, "n1-highmem-16-preemptible": 0.308, "n1-highmem-32": 2.224, "n1-highmem-32-preemptible": 0.616, "n1-standard-1": 0.055, "n1-standard-1-preemptible": 0.0165, "n1-standard-2": 0.11, "n1-standard-2-preemptible": 0.033, "n1-standard-4": 0.22, "n1-standard-4-preemptible": 0.066, "n1-standard-8": 0.44, "n1-standard-8-preemptible": 0.132, "n1-standard-16": 0.88, "n1-standard-16-preemptible": 0.264, "n1-standard-32": 1.76, "n1-standard-32-preemptible": 0.528 }, "google_us": { "f1-micro": 0.008, "f1-micro-preemptible": 0.005, "g1-small": 0.027, "g1-small-preemptible": 0.01, "n1-highcpu-2": 0.076, "n1-highcpu-2-preemptible": 0.02, "n1-highcpu-4": 0.152, "n1-highcpu-4-preemptible": 0.04, "n1-highcpu-8": 0.304, "n1-highcpu-8-preemptible": 0.08, "n1-highcpu-16": 0.608, "n1-highcpu-16-preemptible": 0.16, "n1-highcpu-32": 1.216, "n1-highcpu-32-preemptible": 0.32, "n1-highmem-2": 0.126, "n1-highmem-2-preemptible": 0.035, "n1-highmem-4": 0.252, "n1-highmem-4-preemptible": 0.07, "n1-highmem-8": 0.504, "n1-highmem-8-preemptible": 0.14, "n1-highmem-16": 1.008, "n1-highmem-16-preemptible": 0.28, "n1-highmem-32": 2.016, "n1-highmem-32-preemptible": 0.56, "n1-standard-1": 0.05, "n1-standard-1-preemptible": 0.015, "n1-standard-2": 0.1, "n1-standard-2-preemptible": 0.03, "n1-standard-4": 0.2, "n1-standard-4-preemptible": 0.06, "n1-standard-8": 0.4, "n1-standard-8-preemptible": 0.12, "n1-standard-16": 0.8, "n1-standard-16-preemptible": 0.24, "n1-standard-32": 1.6, "n1-standard-32-preemptible": 0.48 }, "nephoscale": { "1": 0.6, "3": 0.063, "5": 0.031, "7": 0.125, "9": 0.188, "11": 0.35, "27": 0.0, "46": 0.1, "48": 0.15, "50": 0.28, "52": 0.48, "54": 0.938, "56": 0.75 }, "nimbus": { "m1.small": 0.0, "m1.large": 0.0, "m1.xlarge": 0.0 }, "osc_inc_eu_west_1": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.300", "cc2.8xlarge": "2.400", "cr1.8xlarge": "3.500", "m1.small": "0.090", "m1.medium": "0.120", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.410", "m2.2xlarge": "0.820", "m2.4xlarge": "1.640", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.250", "nv1.large": "5.490", "nv1.xlarge": "5.610", "os1.8xlarge": "4.310", "t1.micro": "0.040" }, "osc_inc_eu_west_2": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.300", "cc2.8xlarge": "2.400", "cr1.8xlarge": "3.500", "m1.small": "0.090", "m1.medium": "0.120", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.410", "m2.2xlarge": "0.820", "m2.4xlarge": "1.640", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.250", "nv1.large": "5.490", "nv1.xlarge": "5.610", "os1.8xlarge": "4.310", "t1.micro": "0.040" }, "osc_inc_eu_west_3": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.300", "cc2.8xlarge": "2.400", "cr1.8xlarge": "3.500", "m1.small": "0.090", "m1.medium": "0.120", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.410", "m2.2xlarge": "0.820", "m2.4xlarge": "1.640", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.250", "nv1.large": "5.490", "nv1.xlarge": "5.610", "os1.8xlarge": "4.310", "t1.micro": "0.040" }, "osc_inc_us_east_1": { "c1.medium": "0.150", "c1.xlarge": "0.580", "cc1.4xlarge": "1.610", "cc2.8xlarge": "2.400", "cr1.8xlarge": "3.500", "m1.small": "0.060", "m1.medium": "0.180", "m1.large": "0.240", "m1.xlarge": "0.730", "m2.xlarge": "0.410", "m2.2xlarge": "1.020", "m2.4xlarge": "2.040", "m3.xlarge": "0.500", "m3.2xlarge": "1.560", "nv1.small": "5.190", "nv1.medium": "5.250", "nv1.large": "5.490", "nv1.xlarge": "5.610", "os1.8xlarge": "6.400", "t1.micro": "0.020" }, "osc_inc_us_east_2": { "c1.medium": "0.150", "c1.xlarge": "0.580", "cc1.4xlarge": "1.610", "cc2.8xlarge": "2.400", "cr1.8xlarge": "3.500", "m1.small": "0.060", "m1.medium": "0.180", "m1.large": "0.240", "m1.xlarge": "0.730", "m2.xlarge": "0.410", "m2.2xlarge": "1.020", "m2.4xlarge": "2.040", "m3.xlarge": "0.500", "m3.2xlarge": "1.560", "nv1.small": "5.190", "nv1.medium": "5.250", "nv1.large": "5.490", "nv1.xlarge": "5.610", "os1.8xlarge": "6.400", "t1.micro": "0.020" }, "osc_sas_eu_west_1": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.460", "cc2.8xlarge": "2.700", "cr1.8xlarge": "3.750", "m1.small": "0.090", "m1.medium": "0.130", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.460", "m2.2xlarge": "0.920", "m2.4xlarge": "1.840", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.310", "nv1.large": "5.490", "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", "t1.micro": "0.040" }, "osc_sas_eu_west_2": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.460", "cc2.8xlarge": "2.700", "cr1.8xlarge": "3.750", "m1.small": "0.090", "m1.medium": "0.130", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.460", "m2.2xlarge": "0.920", "m2.4xlarge": "1.840", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.310", "nv1.large": "5.490", "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", "t1.micro": "0.040" }, "osc_sas_eu_west_3": { "c1.medium": "0.230", "c1.xlarge": "0.900", "cc1.4xlarge": "1.460", "cc2.8xlarge": "2.700", "cr1.8xlarge": "3.750", "m1.small": "0.090", "m1.medium": "0.130", "m1.large": "0.360", "m1.xlarge": "0.730", "m2.xlarge": "0.460", "m2.2xlarge": "0.920", "m2.4xlarge": "1.840", "m3.xlarge": "0.780", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.310", "nv1.large": "5.490", "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", "t1.micro": "0.040" }, "osc_sas_us_east_1": { "c1.medium": "0.170", "c1.xlarge": "0.660", "cc1.4xlarge": "1.610", "cc2.8xlarge": "2.700", "cr1.8xlarge": "3.750", "m1.small": "0.070", "m1.medium": "0.180", "m1.large": "0.260", "m1.xlarge": "0.730", "m2.xlarge": "0.460", "m2.2xlarge": "1.020", "m2.4xlarge": "2.040", "m3.xlarge": "0.550", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.310", "nv1.large": "5.490", "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", "t1.micro": "0.020" }, "osc_sas_us_east_2": { "c1.medium": "0.170", "c1.xlarge": "0.660", "cc1.4xlarge": "1.610", "cc2.8xlarge": "2.700", "cr1.8xlarge": "3.750", "m1.small": "0.070", "m1.medium": "0.180", "m1.large": "0.260", "m1.xlarge": "0.730", "m2.xlarge": "0.460", "m2.2xlarge": "1.020", "m2.4xlarge": "2.040", "m3.xlarge": "0.550", "m3.2xlarge": "1.560", "nv1.small": "5.220", "nv1.medium": "5.310", "nv1.large": "5.490", "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", "t1.micro": "0.020" }, "rackspace": { "performance1-1": 0.04, "performance1-2": 0.08, "performance1-4": 0.16, "performance1-8": 0.32, "performance2-15": 0.68, "performance2-30": 1.36, "performance2-60": 2.72, "performance2-90": 4.08, "performance2-120": 5.44, "1": 0.015, "2": 0.03, "3": 0.06, "4": 0.12, "5": 0.24, "6": 0.48, "7": 0.96, "8": 1.8 }, "rackspacenovalon": { "performance1-1": 0.04, "performance1-2": 0.08, "performance1-4": 0.16, "performance1-8": 0.32, "performance2-15": 0.68, "performance2-30": 1.36, "performance2-60": 2.72, "performance2-90": 4.08, "performance2-120": 5.44, "2": 0.032, "3": 0.064, "4": 0.129, "5": 0.258, "6": 0.516, "7": 0.967, "8": 1.612 }, "rackspacenovasyd": { "performance1-1": 0.04, "performance1-2": 0.08, "performance1-4": 0.16, "performance1-8": 0.32, "performance2-15": 0.68, "performance2-30": 1.36, "performance2-60": 2.72, "performance2-90": 4.08, "performance2-120": 5.44, "2": 0.026, "3": 0.072, "4": 0.144, "5": 0.288, "6": 0.576, "7": 1.08, "8": 1.44 }, "rackspacenovaus": { "performance1-1": 0.04, "performance1-2": 0.08, "performance1-4": 0.16, "performance1-8": 0.32, "performance2-15": 0.68, "performance2-30": 1.36, "performance2-60": 2.72, "performance2-90": 4.08, "performance2-120": 5.44, "2": 0.022, "3": 0.06, "4": 0.12, "5": 0.24, "6": 0.48, "7": 0.96, "8": 1.2 }, "serverlove": { "small": 0.161, "medium": 0.404, "large": 0.534, "extra-large": 0.615, "high-cpu-extra-large": 0.776, "high-cpu-medium": 0.291 }, "skalicloud": { "small": 0.136, "medium": 0.301, "large": 0.505, "extra-large": 0.654, "high-cpu-extra-large": 0.936, "high-cpu-medium": 0.249 }, "softlayer": { "0": 0.023, "1": 0.024, "2": 0.024, "3": 0.026, "4": 0.045, "5": 0.045, "6": 0.045, "7": 0.09, "8": 0.09, "9": 0.09, "10": 0.09, "11": 0.205, "12": 0.205 }, "vps_net": { "1": 0.416 } }, "storage": {}, "updated": 1494321515 }apache-libcloud-2.2.1/libcloud/common/0000775000175000017500000000000013160535107017504 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/common/liquidweb.py0000664000175000017500000001471212705475361022061 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from libcloud.common.base import JsonResponse from libcloud.common.base import ConnectionUserAndKey from libcloud.utils.py3 import b from libcloud.common.types import ProviderError __all__ = [ 'API_HOST', 'LiquidWebException', 'LiquidWebResponse', 'LiquidWebConnection', ] # Endpoint for liquidweb api. API_HOST = 'api.stormondemand.com' class LiquidWebException(ProviderError): """The base class for other Liquidweb exceptions""" def __init__(self, value, http_code, extra=None): """ :param value: message contained in error :type value: ``str`` :param http_code: error code :type http_code: ``int`` :param extra: extra fields specific to error type :type extra: ``list`` """ self.extra = extra super(LiquidWebException, self).__init__(value, http_code, driver=None) def __str__(self): return "%s %s" % (self.http_code, self.value) def __repr__(self): return "LiquidWebException %s %s" % (self.http_code, self.value) class APIException(LiquidWebException): def __init__(self, error_class, full_msg, http_code, extra=None): self.error_class = error_class super(APIException, self).__init__(full_msg, http_code, extra=extra) def __str__(self): return "%s: %s" % (self.error_class, self.value) def __repr__(self): return "%s: %s" % (self.error_class, self.value) EXCEPTIONS_FIELDS = { 'LW::Exception::API::Internal': { 'fields': [] }, 'LW::Exception::API::InvalidEncoding': { 'fields': ['encoding'] }, 'LW::Exception::API::InvalidMethod': { 'fields': ['method'] }, 'LW::Exception::API::Maintenance': { 'fields': [] }, 'LW::Exception::API::RateLimit': { 'fields': ['account', 'ip', 'method'] }, 'LW::Exception::Authorization': { 'fields': ['username'] }, 'LW::Exception::DNS::NoResponse': { 'fields': ['nameservers'] }, 'LW::Exception::DNS::Servfail': { 'fields': ['nameservers'] }, 'LW::Exception::Deserialize': { 'fields': ['data', 'encoding'] }, 'LW::Exception::DuplicateRecord': { 'fields': ['field', 'input', 'statement'] }, 'LW::Exception::Forbidden': { 'fields': [] }, 'LW::Exception::Incapable': { 'fields': ['capability', 'thing'] }, 'LW::Exception::Input': { 'fields': ['field'] }, 'LW::Exception::Input::Disallowed': { 'fields': ['field'] }, 'LW::Exception::Input::Multiple': { 'fields': ['errors', 'field', 'type'] }, 'LW::Exception::Input::NotInRealm': { 'fields': ['field', 'valid', 'value'] }, 'LW::Exception::Input::OutOfBounds': { 'fields': ['field', 'max', 'min', 'value'] }, 'LW::Exception::Input::Required': { 'fields': ['field', 'position'] }, 'LW::Exception::Input::Unknown': { 'fields': ['field', 'value'] }, 'LW::Exception::Input::Validation': { 'fields': ['field', 'type', 'value'] }, 'LW::Exception::Permission': { 'fields': ['account', 'identifier'] }, 'LW::Exception::RecordNotFound': { 'fields': ['field', 'input'] }, 'LW::Exception::RemoteService::Authorization': { 'fields': ['url'] }, 'LW::Exception::Resource': { 'fields': ['resource'] }, 'LW::Exception::Resource::Insufficient': { 'fields': ['available', 'requested', 'resource'] }, 'LW::Exception::Resource::Unavailable': { 'fields': ['resource'] }, 'LW::Exception::Serialize': { 'fields': ['data', 'encoding'] }, 'LW::Exception::Workflow::Conflict': { 'fields': ['conflict', 'workflow'] } } class LiquidWebResponse(JsonResponse): objects = None errors = None error_dict = {} def __init__(self, response, connection): self.errors = [] super(LiquidWebResponse, self).__init__(response=response, connection=connection) self.objects, self.errors = self.parse_body_and_errors() if self.errors: error = self.errors.pop() raise self._make_excp(error, self.status) def parse_body_and_errors(self): data = [] errors = [] js = super(LiquidWebResponse, self).parse_body() if 'items' in js: data.append(js['items']) if 'name' in js: data.append(js) if 'deleted' in js: data.append(js['deleted']) if 'error_class' in js: errors.append(js) return (data, errors) def success(self): """ Returns ``True`` if our request is successful. """ return (len(self.errors) == 0) def _make_excp(self, error, status): """ Raise LiquidWebException. """ exc_type = error.get('error_class') message = error.get('full_message') try: _type = EXCEPTIONS_FIELDS[exc_type] fields = _type.get('fields') extra = {} except KeyError: fields = [] for field in fields: extra[field] = error.get(field) return APIException(exc_type, message, status, extra=extra) class LiquidWebConnection(ConnectionUserAndKey): host = API_HOST responseCls = LiquidWebResponse def add_default_headers(self, headers): b64string = b('%s:%s' % (self.user_id, self.key)) encoded = base64.b64encode(b64string).decode('utf-8') authorization = 'Basic ' + encoded headers['Authorization'] = authorization headers['Content-Type'] = 'application/json' return headers apache-libcloud-2.2.1/libcloud/common/gandi.py0000664000175000017500000001403312705475361021152 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Gandi driver base classes """ import time import hashlib import sys from libcloud.utils.py3 import b from libcloud.common.base import ConnectionKey from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection # Global constants DEFAULT_TIMEOUT = 600 # operation pooling max seconds DEFAULT_INTERVAL = 20 # seconds between 2 operation.info class GandiException(Exception): """ Exception class for Gandi driver """ def __str__(self): return '(%u) %s' % (self.args[0], self.args[1]) def __repr__(self): return '' % (self.args[0], self.args[1]) class GandiResponse(XMLRPCResponse): """ A Base Gandi Response class to derive from. """ class GandiConnection(XMLRPCConnection, ConnectionKey): """ Connection class for the Gandi driver """ responseCls = GandiResponse host = 'rpc.gandi.net' endpoint = '/xmlrpc/' def __init__(self, key, secure=True, timeout=None, retry_delay=None, backoff=None, proxy_url=None): # Note: Method resolution order in this case is # XMLRPCConnection -> Connection and Connection doesn't take key as the # first argument so we specify a keyword argument instead. # Previously it was GandiConnection -> ConnectionKey so it worked fine. super(GandiConnection, self).__init__(key=key, secure=secure, timeout=timeout, retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url) self.driver = BaseGandiDriver def request(self, method, *args): args = (self.key, ) + args return super(GandiConnection, self).request(method, *args) class BaseGandiDriver(object): """ Gandi base driver """ connectionCls = GandiConnection name = 'Gandi' # Specific methods for gandi def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): """ Wait for an operation to succeed""" for i in range(0, timeout, check_interval): try: # pylint: disable=no-member op = self.connection.request('operation.info', int(id)).object if op['step'] == 'DONE': return True if op['step'] in ['ERROR', 'CANCEL']: return False except (KeyError, IndexError): pass except Exception: e = sys.exc_info()[1] raise GandiException(1002, e) time.sleep(check_interval) return False class BaseObject(object): """Base class for objects not conventional""" uuid_prefix = '' def __init__(self, id, state, driver): self.id = str(id) if id else None self.state = state self.driver = driver self.uuid = self.get_uuid() def get_uuid(self): """Unique hash for this object :return: ``str`` The hash is a function of an SHA1 hash of prefix, the object's ID and its driver which means that it should be unique between all interfaces. TODO : to review >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> vif = driver.create_interface() >>> vif.get_uuid() 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' Note, for example, that this example will always produce the same UUID! """ hashstring = '%s:%s:%s' % \ (self.uuid_prefix, self.id, self.driver.type) return hashlib.sha1(b(hashstring)).hexdigest() class IPAddress(BaseObject): """ Provide a common interface for ip addresses """ uuid_prefix = 'inet:' def __init__(self, id, state, inet, driver, version=4, extra=None): super(IPAddress, self).__init__(id, state, driver) self.inet = inet self.version = version self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.inet, self.state, self.driver.name)) class NetworkInterface(BaseObject): """ Provide a common interface for network interfaces """ uuid_prefix = 'if:' def __init__(self, id, state, mac_address, driver, ips=None, node_id=None, extra=None): super(NetworkInterface, self).__init__(id, state, driver) self.mac = mac_address self.ips = ips or {} self.node_id = node_id self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.mac, self.state, self.driver.name)) class Disk(BaseObject): """ Gandi disk component """ def __init__(self, id, state, name, driver, size, extra=None): super(Disk, self).__init__(id, state, driver) self.name = name self.size = size self.extra = extra or {} def __repr__(self): return ( ('') % (self.id, self.name, self.state, self.size, self.driver.name)) apache-libcloud-2.2.1/libcloud/common/providers.py0000664000175000017500000000703612701223644022101 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common methods for obtaining a reference to the provider driver class. """ import sys __all__ = [ 'get_driver', 'set_driver' ] def get_driver(drivers, provider, deprecated_providers=None, deprecated_constants=None): """ Get a driver. :param drivers: Dictionary containing valid providers. :type drivers: ``dict`` :param provider: Id (constant) of provider to get the driver for. :type provider: :class:`libcloud.types.Provider` :param: deprecated_providers: Dictionary with information about the deprecated drivers. :type deprecated_providers: ``dict`` :param: deprecated_constants: Dictionary with information about the deprecated provider constants. :type deprecated_constants: ``dict`` """ # Those providers have been shut down or similar. deprecated_providers = deprecated_providers or {} if provider in deprecated_providers: url = deprecated_providers[provider]['url'] reason = deprecated_providers[provider]['reason'] msg = ('Provider no longer supported: %s, please visit: %s' % (url, reason)) raise Exception(msg) # Those drivers have moved to "region" constructor argument model deprecated_constants = deprecated_constants or {} if provider in deprecated_constants: old_name = provider.upper() new_name = deprecated_constants[provider].upper() url = 'https://s.apache.org/lc0140un' msg = ('Provider constant "%s" has been removed. New constant ' 'is now called "%s".\n' 'For more information on this change and how to modify your ' 'code to work with it, please visit: %s' % (old_name, new_name, url)) raise Exception(msg) if provider in drivers: mod_name, driver_name = drivers[provider] _mod = __import__(mod_name, globals(), locals(), [driver_name]) return getattr(_mod, driver_name) raise AttributeError('Provider %s does not exist' % (provider)) def set_driver(drivers, provider, module, klass): """ Sets a driver. :param drivers: Dictionary to store providers. :param provider: Id of provider to set driver for :type provider: :class:`libcloud.types.Provider` :param module: The module which contains the driver :type module: L :param klass: The driver class name :type klass: """ if provider in drivers: raise AttributeError('Provider %s already registered' % (provider)) drivers[provider] = (module, klass) # Check if this driver is valid try: driver = get_driver(drivers, provider) except (ImportError, AttributeError): exp = sys.exc_info()[1] drivers.pop(provider) raise exp return driver apache-libcloud-2.2.1/libcloud/common/__init__.py0000664000175000017500000000000012701023453021577 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/common/hostvirtual.py0000664000175000017500000000471412701023453022444 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import InvalidCredsError from libcloud.common.types import LibcloudError API_HOST = 'vapi.vr.org' class HostVirtualException(LibcloudError): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return self.__repr__() def __repr__(self): return '' % (self.code, self.message) class HostVirtualConnection(ConnectionKey): host = API_HOST allow_insecure = False def add_default_params(self, params): params['key'] = self.key return params class HostVirtualResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_body(self): if not self.body: return None data = json.loads(self.body) return data def parse_error(self): data = self.parse_body() if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError('%(code)s:%(message)s' % (data['error'])) elif self.status == httplib.PRECONDITION_FAILED: raise HostVirtualException( data['error']['code'], data['error']['message']) elif self.status == httplib.NOT_FOUND: raise HostVirtualException( data['error']['code'], data['error']['message']) return self.body def success(self): return self.status in self.valid_response_codes apache-libcloud-2.2.1/libcloud/common/ovh.py0000664000175000017500000001342313153541406020656 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import time try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.connection import get_response_object from libcloud.common.types import InvalidCredsError from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.http import LibcloudConnection __all__ = [ 'OvhResponse', 'OvhConnection' ] API_HOST = 'api.ovh.com' API_ROOT = '/1.0' LOCATIONS = { 'SBG1': {'id': 'SBG1', 'name': 'Strasbourg 1', 'country': 'FR'}, 'BHS1': {'id': 'BHS1', 'name': 'Montreal 1', 'country': 'CA'}, 'GRA1': {'id': 'GRA1', 'name': 'Gravelines 1', 'country': 'FR'} } DEFAULT_ACCESS_RULES = [ {'method': 'GET', 'path': '/*'}, {'method': 'POST', 'path': '/*'}, {'method': 'PUT', 'path': '/*'}, {'method': 'DELETE', 'path': '/*'}, ] class OvhException(Exception): pass class OvhResponse(JsonResponse): def parse_error(self): response = super(OvhResponse, self).parse_body() response = response or {} if response.get('errorCode', None) == 'INVALID_SIGNATURE': raise InvalidCredsError('Signature validation failed, probably ' 'using invalid credentials') return self.body class OvhConnection(ConnectionUserAndKey): """ A connection to the Ovh API Wraps SSL connections to the Ovh API, automagically injecting the parameters that the API needs for each request. """ host = API_HOST request_path = API_ROOT responseCls = OvhResponse timestamp = None ua = [] LOCATIONS = LOCATIONS _timedelta = None allow_insecure = True def __init__(self, user_id, *args, **kwargs): self.consumer_key = kwargs.pop('ex_consumer_key', None) if self.consumer_key is None: consumer_key_json = self.request_consumer_key(user_id) msg = ("Your consumer key isn't validated, " "go to '%(validationUrl)s' for valid it. After instantiate " "your driver with \"ex_consumer_key='%(consumerKey)s'\"." % consumer_key_json) raise OvhException(msg) super(OvhConnection, self).__init__(user_id, *args, **kwargs) def request_consumer_key(self, user_id): action = self.request_path + '/auth/credential' data = json.dumps({ 'accessRules': DEFAULT_ACCESS_RULES, 'redirection': 'http://ovh.com', }) headers = { 'Content-Type': 'application/json', 'X-Ovh-Application': user_id, } httpcon = LibcloudConnection(host=self.host, port=443) httpcon.request(method='POST', url=action, body=data, headers=headers) response = httpcon.getresponse() if response.status == httplib.UNAUTHORIZED: raise InvalidCredsError() body = response.read() json_response = json.loads(body) httpcon.close() return json_response def get_timestamp(self): if not self._timedelta: url = 'https://%s%s/auth/time' % (API_HOST, API_ROOT) response = get_response_object(url=url, method='GET', headers={}) if not response or not response.body: raise Exception('Failed to get current time from Ovh API') timestamp = int(response.body) self._timedelta = timestamp - int(time.time()) return int(time.time()) + self._timedelta def make_signature(self, method, action, params, data, timestamp): full_url = 'https://%s%s' % (API_HOST, action) if params: full_url += '?' for key, value in params.items(): full_url += '%s=%s&' % (key, value) full_url = full_url[:-1] sha1 = hashlib.sha1() base_signature = "+".join([ self.key, self.consumer_key, method.upper(), full_url, data if data else '', str(timestamp), ]) sha1.update(base_signature.encode()) signature = '$1$' + sha1.hexdigest() return signature def add_default_params(self, params): return params def add_default_headers(self, headers): headers.update({ 'X-Ovh-Application': self.user_id, 'X-Ovh-Consumer': self.consumer_key, 'Content-type': 'application/json', }) return headers def request(self, action, params=None, data=None, headers=None, method='GET', raw=False): data = json.dumps(data) if data else None timestamp = self.get_timestamp() signature = self.make_signature(method, action, params, data, timestamp) headers = headers or {} headers.update({ 'X-Ovh-Timestamp': timestamp, 'X-Ovh-Signature': signature }) return super(OvhConnection, self)\ .request(action, params=params, data=data, headers=headers, method=method, raw=raw) apache-libcloud-2.2.1/libcloud/common/cloudsigma.py0000664000175000017500000001044513153541406022212 0ustar kamikami00000000000000# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'API_ENDPOINTS_1_0', 'API_ENDPOINTS_2_0', 'API_VERSIONS', 'INSTANCE_TYPES' ] # API end-points API_ENDPOINTS_1_0 = { 'zrh': { 'name': 'Zurich', 'country': 'Switzerland', 'host': 'api.zrh.cloudsigma.com' }, 'lvs': { 'name': 'Las Vegas', 'country': 'United States', 'host': 'api.lvs.cloudsigma.com' } } API_ENDPOINTS_2_0 = { 'zrh': { 'name': 'Zurich', 'country': 'Switzerland', 'host': 'zrh.cloudsigma.com' }, 'sjc': { 'name': 'San Jose, CA', 'country': 'United States', 'host': 'sjc.cloudsigma.com' }, 'mia': { 'name': 'Miami, FL', 'country': 'United States', 'host': 'mia.cloudsigma.com' }, 'wdc': { 'name': 'Washington, DC', 'country': 'United States', 'host': 'wdc.cloudsigma.com' }, 'hnl': { 'name': 'Honolulu, HI', 'country': 'United States', 'host': 'hnl.cloudsigma.com' }, 'per': { 'name': 'Perth, Australia', 'country': 'Australia', 'host': 'per.cloudsigma.com' }, 'mnl': { 'name': 'Manila, Philippines', 'country': 'Philippines', 'host': 'mnl.cloudsigma.com' }, 'waw': { 'name': 'Warsaw, Poland', 'country': 'Poland', 'host': 'waw.cloudsigma.com' } } DEFAULT_REGION = 'zrh' # Supported API versions. API_VERSIONS = [ '1.0' # old and deprecated '2.0' ] DEFAULT_API_VERSION = '2.0' # CloudSigma doesn't specify special instance types. # Basically for CPU any value between 0.5 GHz and 20.0 GHz should work, # 500 MB to 32000 MB for ram # and 1 GB to 1024 GB for hard drive size. # Plans in this file are based on examples listed on http://www.cloudsigma # .com/en/pricing/price-schedules INSTANCE_TYPES = [ { 'id': 'micro-regular', 'name': 'Micro/Regular instance', 'cpu': 1100, 'memory': 640, 'disk': 10 + 3, 'bandwidth': None, }, { 'id': 'micro-high-cpu', 'name': 'Micro/High CPU instance', 'cpu': 2200, 'memory': 640, 'disk': 80, 'bandwidth': None, }, { 'id': 'standard-small', 'name': 'Standard/Small instance', 'cpu': 1100, 'memory': 1741, 'disk': 50, 'bandwidth': None, }, { 'id': 'standard-large', 'name': 'Standard/Large instance', 'cpu': 4400, 'memory': 7680, 'disk': 250, 'bandwidth': None, }, { 'id': 'standard-extra-large', 'name': 'Standard/Extra Large instance', 'cpu': 8800, 'memory': 15360, 'disk': 500, 'bandwidth': None, }, { 'id': 'high-memory-extra-large', 'name': 'High Memory/Extra Large instance', 'cpu': 7150, 'memory': 17510, 'disk': 250, 'bandwidth': None, }, { 'id': 'high-memory-double-extra-large', 'name': 'High Memory/Double Extra Large instance', 'cpu': 14300, 'memory': 32768, 'disk': 500, 'bandwidth': None, }, { 'id': 'high-cpu-medium', 'name': 'High CPU/Medium instance', 'cpu': 5500, 'memory': 1741, 'disk': 150, 'bandwidth': None, }, { 'id': 'high-cpu-extra-large', 'name': 'High CPU/Extra Large instance', 'cpu': 20000, 'memory': 7168, 'disk': 500, 'bandwidth': None, } ] apache-libcloud-2.2.1/libcloud/common/rackspace.py0000664000175000017500000000164612701023453022015 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common settings for Rackspace Cloud Servers and Cloud Files """ __all__ = [ 'AUTH_URL' ] AUTH_URL = 'https://identity.api.rackspacecloud.com' apache-libcloud-2.2.1/libcloud/common/aliyun.py0000664000175000017500000002114613153541406021364 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hashlib import hmac import sys import time import uuid from libcloud.utils.py3 import ET from libcloud.common.base import ConnectionUserAndKey, XmlResponse from libcloud.common.types import MalformedResponseError from libcloud.utils.py3 import b, u, urlquote, PY3 from libcloud.utils.xml import findtext __all__ = [ 'AliyunXmlResponse', 'AliyunRequestSigner', 'AliyunRequestSignerAlgorithmV1_0', 'SignedAliyunConnection', 'AliyunConnection', 'SIGNATURE_VERSION_1_0', 'DEFAULT_SIGNATURE_VERSION' ] SIGNATURE_VERSION_1_0 = '1.0' DEFAULT_SIGNATURE_VERSION = SIGNATURE_VERSION_1_0 class AliyunXmlResponse(XmlResponse): namespace = None def success(self): return self.status >= 200 and self.status < 300 def parse_body(self): """ Each response from Aliyun contains a request id and a host id. The response body is in utf-8 encoding. """ if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: if PY3: parser = ET.XMLParser(encoding='utf-8') body = ET.XML(self.body.encode('utf-8'), parser=parser) else: try: body = ET.XML(self.body) except ValueError: body = ET.XML(self.body.encode('utf-8')) except: raise MalformedResponseError('Failed to parse XML', body=self.body, driver=self.connection.driver) self.request_id = findtext(element=body, xpath='RequestId', namespace=self.namespace) self.host_id = findtext(element=body, xpath='HostId', namespace=self.namespace) return body def parse_error(self): """ Parse error responses from Aliyun. """ body = super(AliyunXmlResponse, self).parse_error() code, message = self._parse_error_details(element=body) request_id = findtext(element=body, xpath='RequestId', namespace=self.namespace) host_id = findtext(element=body, xpath='HostId', namespace=self.namespace) error = {'code': code, 'message': message, 'request_id': request_id, 'host_id': host_id} return u(error) def _parse_error_details(self, element): """ Parse error code and message from the provided error element. :return: ``tuple`` with two elements: (code, message) :rtype: ``tuple`` """ code = findtext(element=element, xpath='Code', namespace=self.namespace) message = findtext(element=element, xpath='Message', namespace=self.namespace) return (code, message) class AliyunRequestSigner(object): """ Class handles signing the outgoing Aliyun requests. """ def __init__(self, access_key, access_secret, version): """ :param access_key: Access key. :type access_key: ``str`` :param access_secret: Access secret. :type access_secret: ``str`` :param version: API version. :type version: ``str`` """ self.access_key = access_key self.access_secret = access_secret self.version = version def get_request_params(self, params, method='GET', path='/'): return params def get_request_headers(self, params, headers, method='GET', path='/'): return params, headers class AliyunRequestSignerAlgorithmV1_0(AliyunRequestSigner): """Aliyun request signer using signature version 1.0.""" def get_request_params(self, params, method='GET', path='/'): params['Format'] = 'XML' params['Version'] = self.version params['AccessKeyId'] = self.access_key params['SignatureMethod'] = 'HMAC-SHA1' params['SignatureVersion'] = SIGNATURE_VERSION_1_0 params['SignatureNonce'] = _get_signature_nonce() # TODO: Support 'ResourceOwnerAccount' params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) params['Signature'] = self._sign_request(params, method, path) return params def _sign_request(self, params, method, path): """ Sign Aliyun requests parameters and get the signature. StringToSign = HTTPMethod + '&' + percentEncode('/') + '&' + percentEncode(CanonicalizedQueryString) """ keys = list(params.keys()) keys.sort() pairs = [] for key in keys: pairs.append('%s=%s' % (_percent_encode(key), _percent_encode(params[key]))) qs = urlquote('&'.join(pairs), safe='-_.~') string_to_sign = '&'.join((method, urlquote(path, safe=''), qs)) b64_hmac = base64.b64encode( hmac.new(b(self._get_access_secret()), b(string_to_sign), digestmod=hashlib.sha1).digest() ) return b64_hmac.decode('utf8') def _get_access_secret(self): return '%s&' % self.access_secret class AliyunConnection(ConnectionUserAndKey): pass class SignedAliyunConnection(AliyunConnection): api_version = None def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, retry_delay=None, backoff=None, api_version=None, signature_version=DEFAULT_SIGNATURE_VERSION): super(SignedAliyunConnection, self).__init__(user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url, retry_delay=retry_delay, backoff=backoff) self.signature_version = str(signature_version) if self.signature_version == '1.0': signer_cls = AliyunRequestSignerAlgorithmV1_0 else: raise ValueError('Unsupported signature_version: %s' % signature_version) if api_version is not None: self.api_version = str(api_version) else: if self.api_version is None: raise ValueError('Unsupported null api_version') self.signer = signer_cls(access_key=self.user_id, access_secret=self.key, version=self.api_version) def add_default_params(self, params): params = self.signer.get_request_params(params=params, method=self.method, path=self.action) return params def _percent_encode(encode_str): """ Encode string to utf8, quote for url and replace '+' with %20, '*' with %2A and keep '~' not converted. :param src_str: ``str`` in the same encoding with sys.stdin, default to encoding cp936. :return: ``str`` represents the encoded result :rtype: ``str`` """ encoding = sys.stdin.encoding or 'cp936' decoded = str(encode_str) if PY3: if isinstance(encode_str, bytes): decoded = encode_str.decode(encoding) else: decoded = str(encode_str).decode(encoding) res = urlquote( decoded.encode('utf8'), '') res = res.replace('+', '%20') res = res.replace('*', '%2A') res = res.replace('%7E', '~') return res def _get_signature_nonce(): return str(uuid.uuid4()) apache-libcloud-2.2.1/libcloud/common/linode.py0000664000175000017500000001272413153541406021337 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.types import InvalidCredsError __all__ = [ 'API_HOST', 'API_ROOT', 'LinodeException', 'LinodeResponse', 'LinodeConnection' ] # Endpoint for the Linode API API_HOST = 'api.linode.com' API_ROOT = '/' # Constants that map a RAM figure to a PlanID (updated 2014-08-25) LINODE_PLAN_IDS = {1024: '1', 2048: '2', 4096: '4', 8192: '6', 16384: '7', 32768: '8', 49152: '9', 65536: '10', 98304: '12'} # Available filesystems for disk creation LINODE_DISK_FILESYSTEMS = ['ext3', 'ext4', 'swap', 'raw'] class LinodeException(Exception): """Error originating from the Linode API This class wraps a Linode API error, a list of which is available in the API documentation. All Linode API errors are a numeric code and a human-readable description. """ def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "(%u) %s" % (self.code, self.message) def __repr__(self): return "" % (self.code, self.message) class LinodeResponse(JsonResponse): """ Linode API response Wraps the HTTP response returned by the Linode API. libcloud does not take advantage of batching, so a response will always reflect the above format. A few weird quirks are caught here as well. """ objects = None def __init__(self, response, connection): """Instantiate a LinodeResponse from the HTTP response :keyword response: The raw response returned by urllib :return: parsed :class:`LinodeResponse`""" self.errors = [] super(LinodeResponse, self).__init__(response, connection) self.invalid = LinodeException(0xFF, "Invalid JSON received from server") # Move parse_body() to here; we can't be sure of failure until we've # parsed the body into JSON. self.objects, self.errors = self.parse_body() if not self.success(): # Raise the first error, as there will usually only be one raise self.errors[0] def parse_body(self): """Parse the body of the response into JSON objects If the response chokes the parser, action and data will be returned as None and errorarray will indicate an invalid JSON exception. :return: ``list`` of objects and ``list`` of errors""" js = super(LinodeResponse, self).parse_body() try: if isinstance(js, dict): # solitary response - promote to list js = [js] ret = [] errs = [] for obj in js: if ("DATA" not in obj or "ERRORARRAY" not in obj or "ACTION" not in obj): ret.append(None) errs.append(self.invalid) continue ret.append(obj["DATA"]) errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"]) return (ret, errs) except: return (None, [self.invalid]) def success(self): """Check the response for success The way we determine success is by the presence of an error in ERRORARRAY. If one is there, we assume the whole request failed. :return: ``bool`` indicating a successful request""" return len(self.errors) == 0 def _make_excp(self, error): """Convert an API error to a LinodeException instance :keyword error: JSON object containing ``ERRORCODE`` and ``ERRORMESSAGE`` :type error: dict""" if "ERRORCODE" not in error or "ERRORMESSAGE" not in error: return None if error["ERRORCODE"] == 4: return InvalidCredsError(error["ERRORMESSAGE"]) return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"]) class LinodeConnection(ConnectionKey): """ A connection to the Linode API Wraps SSL connections to the Linode API, automagically injecting the parameters that the API needs for each request. """ host = API_HOST responseCls = LinodeResponse def add_default_params(self, params): """ Add parameters that are necessary for every request This method adds ``api_key`` and ``api_responseFormat`` to the request. """ params["api_key"] = self.key # Be explicit about this in case the default changes. params["api_responseFormat"] = "json" return params apache-libcloud-2.2.1/libcloud/common/openstack_identity.py0000664000175000017500000016462513153541406023775 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common / shared code for handling authentication against OpenStack identity service (Keystone). """ import sys import datetime from libcloud.utils.py3 import httplib from libcloud.utils.iso8601 import parse_date from libcloud.common.base import (ConnectionUserAndKey, Response, CertificateConnection) from libcloud.compute.types import (LibcloudError, InvalidCredsError, MalformedResponseError) try: import simplejson as json except ImportError: import json AUTH_API_VERSION = '1.1' # Auth versions which contain token expiration information. AUTH_VERSIONS_WITH_EXPIRES = [ '1.1', '2.0', '2.0_apikey', '2.0_password', '2.0_voms', '3.0', '3.x_password', '3.x_oidc_access_token' ] # How many seconds to subtract from the auth token expiration time before # testing if the token is still valid. # The time is subtracted to account for the HTTP request latency and prevent # user from getting "InvalidCredsError" if token is about to expire. AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5 __all__ = [ 'OpenStackIdentityVersion', 'OpenStackIdentityDomain', 'OpenStackIdentityProject', 'OpenStackIdentityUser', 'OpenStackIdentityRole', 'OpenStackServiceCatalog', 'OpenStackServiceCatalogEntry', 'OpenStackServiceCatalogEntryEndpoint', 'OpenStackIdentityEndpointType', 'OpenStackIdentityConnection', 'OpenStackIdentity_1_0_Connection', 'OpenStackIdentity_1_1_Connection', 'OpenStackIdentity_2_0_Connection', 'OpenStackIdentity_2_0_Connection_VOMS', 'OpenStackIdentity_3_0_Connection', 'OpenStackIdentity_3_0_Connection_OIDC_access_token', 'get_class_for_auth_version' ] class OpenStackIdentityEndpointType(object): """ Enum class for openstack identity endpoint type. """ INTERNAL = 'internal' EXTERNAL = 'external' ADMIN = 'admin' class OpenStackIdentityTokenScope(object): """ Enum class for openstack identity token scope. """ PROJECT = 'project' DOMAIN = 'domain' UNSCOPED = 'unscoped' class OpenStackIdentityVersion(object): def __init__(self, version, status, updated, url): self.version = version self.status = status self.updated = updated self.url = url def __repr__(self): return (('' % (self.version, self.status, self.updated, self.url))) class OpenStackIdentityDomain(object): def __init__(self, id, name, enabled): self.id = id self.name = name self.enabled = enabled def __repr__(self): return (('' % (self.id, self.name, self.enabled))) class OpenStackIdentityProject(object): def __init__(self, id, name, description, enabled, domain_id=None): self.id = id self.name = name self.description = description self.enabled = enabled self.domain_id = domain_id def __repr__(self): return (('' % (self.id, self.domain_id, self.name, self.enabled))) class OpenStackIdentityRole(object): def __init__(self, id, name, description, enabled): self.id = id self.name = name self.description = description self.enabled = enabled def __repr__(self): return (('' % (self.id, self.name, self.description, self.enabled))) class OpenStackIdentityUser(object): def __init__(self, id, domain_id, name, email, description, enabled): self.id = id self.domain_id = domain_id self.name = name self.email = email self.description = description self.enabled = enabled def __repr__(self): return (('' % (self.id, self.domain_id, self.name, self.email, self.enabled))) class OpenStackServiceCatalog(object): """ http://docs.openstack.org/api/openstack-identity-service/2.0/content/ This class should be instantiated with the contents of the 'serviceCatalog' in the auth response. This will do the work of figuring out which services actually exist in the catalog as well as split them up by type, name, and region if available """ _auth_version = None _service_catalog = None def __init__(self, service_catalog, auth_version=AUTH_API_VERSION): self._auth_version = auth_version # Check this way because there are a couple of different 2.0_* # auth types. if '3.x' in self._auth_version: entries = self._parse_service_catalog_auth_v3( service_catalog=service_catalog) elif '2.0' in self._auth_version: entries = self._parse_service_catalog_auth_v2( service_catalog=service_catalog) elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): entries = self._parse_service_catalog_auth_v1( service_catalog=service_catalog) else: raise LibcloudError('auth version "%s" not supported' % (self._auth_version)) # Force consistent ordering by sorting the entries entries = sorted(entries, key=lambda x: x.service_type + (x.service_name or '')) self._entries = entries # stories all the service catalog entries def get_entries(self): """ Return all the entries for this service catalog. :rtype: ``list`` of :class:`.OpenStackServiceCatalogEntry` """ return self._entries def get_catalog(self): """ Deprecated in the favor of ``get_entries`` method. """ return self.get_entries() def get_public_urls(self, service_type=None, name=None): """ Retrieve all the available public (external) URLs for the provided service type and name. """ endpoints = self.get_endpoints(service_type=service_type, name=name) result = [] for endpoint in endpoints: endpoint_type = endpoint.endpoint_type if endpoint_type == OpenStackIdentityEndpointType.EXTERNAL: result.append(endpoint.url) return result def get_endpoints(self, service_type=None, name=None): """ Retrieve all the endpoints for the provided service type and name. :rtype: ``list`` of :class:`.OpenStackServiceCatalogEntryEndpoint` """ endpoints = [] for entry in self._entries: # Note: "if XXX and YYY != XXX" comparison is used to support # partial lookups. # This allows user to pass in only one argument to the method (only # service_type or name), both of them or neither. if service_type and entry.service_type != service_type: continue if name and entry.service_name != name: continue for endpoint in entry.endpoints: endpoints.append(endpoint) return endpoints def get_endpoint(self, service_type=None, name=None, region=None, endpoint_type=OpenStackIdentityEndpointType.EXTERNAL): """ Retrieve a single endpoint using the provided criteria. Note: If no or more than one matching endpoint is found, an exception is thrown. """ endpoints = [] for entry in self._entries: if service_type and entry.service_type != service_type: continue if name and entry.service_name != name: continue for endpoint in entry.endpoints: if region and endpoint.region != region: continue if endpoint_type and endpoint.endpoint_type != endpoint_type: continue endpoints.append(endpoint) if len(endpoints) == 1: return endpoints[0] elif len(endpoints) > 1: raise ValueError('Found more than 1 matching endpoint') else: raise LibcloudError('Could not find specified endpoint') def get_regions(self, service_type=None): """ Retrieve a list of all the available regions. :param service_type: If specified, only return regions for this service type. :type service_type: ``str`` :rtype: ``list`` of ``str`` """ regions = set() for entry in self._entries: if service_type and entry.service_type != service_type: continue for endpoint in entry.endpoints: if endpoint.region: regions.add(endpoint.region) return sorted(list(regions)) def get_service_types(self, region=None): """ Retrieve all the available service types. :param region: Optional region to retrieve service types for. :type region: ``str`` :rtype: ``list`` of ``str`` """ service_types = set() for entry in self._entries: include = True for endpoint in entry.endpoints: if region and endpoint.region != region: include = False break if include: service_types.add(entry.service_type) return sorted(list(service_types)) def get_service_names(self, service_type=None, region=None): """ Retrieve list of service names that match service type and region. :type service_type: ``str`` :type region: ``str`` :rtype: ``list`` of ``str`` """ names = set() if '2.0' not in self._auth_version: raise ValueError('Unsupported version: %s' % (self._auth_version)) for entry in self._entries: if service_type and entry.service_type != service_type: continue include = True for endpoint in entry.endpoints: if region and endpoint.region != region: include = False break if include and entry.service_name: names.add(entry.service_name) return sorted(list(names)) def _parse_service_catalog_auth_v1(self, service_catalog): entries = [] for service, endpoints in service_catalog.items(): entry_endpoints = [] for endpoint in endpoints: region = endpoint.get('region', None) public_url = endpoint.get('publicURL', None) private_url = endpoint.get('internalURL', None) if public_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=public_url, endpoint_type=OpenStackIdentityEndpointType.EXTERNAL) entry_endpoints.append(entry_endpoint) if private_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=private_url, endpoint_type=OpenStackIdentityEndpointType.INTERNAL) entry_endpoints.append(entry_endpoint) entry = OpenStackServiceCatalogEntry(service_type=service, endpoints=entry_endpoints) entries.append(entry) return entries def _parse_service_catalog_auth_v2(self, service_catalog): entries = [] for service in service_catalog: service_type = service['type'] service_name = service.get('name', None) entry_endpoints = [] for endpoint in service.get('endpoints', []): region = endpoint.get('region', None) public_url = endpoint.get('publicURL', None) private_url = endpoint.get('internalURL', None) if public_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=public_url, endpoint_type=OpenStackIdentityEndpointType.EXTERNAL) entry_endpoints.append(entry_endpoint) if private_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=private_url, endpoint_type=OpenStackIdentityEndpointType.INTERNAL) entry_endpoints.append(entry_endpoint) entry = OpenStackServiceCatalogEntry(service_type=service_type, endpoints=entry_endpoints, service_name=service_name) entries.append(entry) return entries def _parse_service_catalog_auth_v3(self, service_catalog): entries = [] for item in service_catalog: service_type = item['type'] service_name = item.get('name', None) entry_endpoints = [] for endpoint in item['endpoints']: region = endpoint.get('region', None) url = endpoint['url'] endpoint_type = endpoint['interface'] if endpoint_type == 'internal': endpoint_type = OpenStackIdentityEndpointType.INTERNAL elif endpoint_type == 'public': endpoint_type = OpenStackIdentityEndpointType.EXTERNAL elif endpoint_type == 'admin': endpoint_type = OpenStackIdentityEndpointType.ADMIN entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=url, endpoint_type=endpoint_type) entry_endpoints.append(entry_endpoint) entry = OpenStackServiceCatalogEntry(service_type=service_type, service_name=service_name, endpoints=entry_endpoints) entries.append(entry) return entries class OpenStackServiceCatalogEntry(object): def __init__(self, service_type, endpoints=None, service_name=None): """ :param service_type: Service type. :type service_type: ``str`` :param endpoints: Endpoints belonging to this entry. :type endpoints: ``list`` :param service_name: Optional service name. :type service_name: ``str`` """ self.service_type = service_type self.endpoints = endpoints or [] self.service_name = service_name # For consistency, sort the endpoints self.endpoints = sorted(self.endpoints, key=lambda x: x.url or '') def __eq__(self, other): return (self.service_type == other.service_type and self.endpoints == other.endpoints and other.service_name == self.service_name) def __ne__(self, other): return not self.__eq__(other=other) def __repr__(self): return (('CodeName to a specific # exception class that is raised immediately. # If a custom exception class is not defined, errors are accumulated and # returned from the parse_error method. exceptions = {} def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): context = self.connection.context status = int(self.status) # FIXME: Probably ditch this as the forbidden message will have # corresponding XML. if status == httplib.FORBIDDEN: if not self.body: raise InvalidCredsError(str(self.status) + ': ' + self.error) else: raise InvalidCredsError(self.body) try: body = ET.XML(self.body) except Exception: raise MalformedResponseError('Failed to parse XML', body=self.body, driver=self.connection.driver) if self.xpath: errs = findall(element=body, xpath=self.xpath, namespace=self.namespace) else: errs = [body] msgs = [] for err in errs: code, message = self._parse_error_details(element=err) exceptionCls = self.exceptions.get(code, None) if exceptionCls is None: msgs.append('%s: %s' % (code, message)) continue # Custom exception class is defined, immediately throw an exception params = {} if hasattr(exceptionCls, 'kwargs'): for key in exceptionCls.kwargs: if key in context: params[key] = context[key] raise exceptionCls(value=message, driver=self.connection.driver, **params) return "\n".join(msgs) class AWSTokenConnection(ConnectionUserAndKey): def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None): self.token = token super(AWSTokenConnection, self).__init__(user_id, key, secure=secure, host=host, port=port, url=url, timeout=timeout, retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url) def add_default_params(self, params): # Even though we are adding it to the headers, we need it here too # so that the token is added to the signature. if self.token: params['x-amz-security-token'] = self.token return super(AWSTokenConnection, self).add_default_params(params) def add_default_headers(self, headers): if self.token: headers['x-amz-security-token'] = self.token return super(AWSTokenConnection, self).add_default_headers(headers) class AWSRequestSigner(object): """ Class which handles signing the outgoing AWS requests. """ def __init__(self, access_key, access_secret, version, connection): """ :param access_key: Access key. :type access_key: ``str`` :param access_secret: Access secret. :type access_secret: ``str`` :param version: API version. :type version: ``str`` :param connection: Connection instance. :type connection: :class:`Connection` """ self.access_key = access_key self.access_secret = access_secret self.version = version # TODO: Remove cycling dependency between connection and signer self.connection = connection def get_request_params(self, params, method='GET', path='/'): return params def get_request_headers(self, params, headers, method='GET', path='/', data=None): return params, headers class AWSRequestSignerAlgorithmV2(AWSRequestSigner): def get_request_params(self, params, method='GET', path='/'): params['SignatureVersion'] = '2' params['SignatureMethod'] = 'HmacSHA256' params['AWSAccessKeyId'] = self.access_key params['Version'] = self.version params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) params['Signature'] = self._get_aws_auth_param( params=params, secret_key=self.access_secret, path=path) return params def _get_aws_auth_param(self, params, secret_key, path='/'): """ Creates the signature required for AWS, per http://bit.ly/aR7GaQ [docs.amazonwebservices.com]: StringToSign = HTTPVerb + "\n" + ValueOfHostHeaderInLowercase + "\n" + HTTPRequestURI + "\n" + CanonicalizedQueryString """ connection = self.connection keys = list(params.keys()) keys.sort() pairs = [] for key in keys: value = str(params[key]) pairs.append(urlquote(key, safe='') + '=' + urlquote(value, safe='-_~')) qs = '&'.join(pairs) hostname = connection.host if (connection.secure and connection.port != 443) or \ (not connection.secure and connection.port != 80): hostname += ':' + str(connection.port) string_to_sign = '\n'.join(('GET', hostname, path, qs)) b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(string_to_sign), digestmod=sha256).digest() ) return b64_hmac.decode('utf-8') class AWSRequestSignerAlgorithmV4(AWSRequestSigner): def get_request_params(self, params, method='GET', path='/'): if method == 'GET': params['Version'] = self.version return params def get_request_headers(self, params, headers, method='GET', path='/', data=None): now = datetime.utcnow() headers['X-AMZ-Date'] = now.strftime('%Y%m%dT%H%M%SZ') headers['X-AMZ-Content-SHA256'] = self._get_payload_hash(method, data) headers['Authorization'] = \ self._get_authorization_v4_header(params=params, headers=headers, dt=now, method=method, path=path, data=data) return params, headers def _get_authorization_v4_header(self, params, headers, dt, method='GET', path='/', data=None): credentials_scope = self._get_credential_scope(dt=dt) signed_headers = self._get_signed_headers(headers=headers) signature = self._get_signature(params=params, headers=headers, dt=dt, method=method, path=path, data=data) return 'AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, ' \ 'SignedHeaders=%(sh)s, Signature=%(s)s' % { 'u': self.access_key, 'c': credentials_scope, 'sh': signed_headers, 's': signature } def _get_signature(self, params, headers, dt, method, path, data): key = self._get_key_to_sign_with(dt) string_to_sign = self._get_string_to_sign(params=params, headers=headers, dt=dt, method=method, path=path, data=data) return _sign(key=key, msg=string_to_sign, hex=True) def _get_key_to_sign_with(self, dt): return _sign( _sign( _sign( _sign(('AWS4' + self.access_secret), dt.strftime('%Y%m%d')), self.connection.driver.region_name), self.connection.service_name), 'aws4_request') def _get_string_to_sign(self, params, headers, dt, method, path, data): canonical_request = self._get_canonical_request(params=params, headers=headers, method=method, path=path, data=data) return '\n'.join(['AWS4-HMAC-SHA256', dt.strftime('%Y%m%dT%H%M%SZ'), self._get_credential_scope(dt), _hash(canonical_request)]) def _get_credential_scope(self, dt): return '/'.join([dt.strftime('%Y%m%d'), self.connection.driver.region_name, self.connection.service_name, 'aws4_request']) def _get_signed_headers(self, headers): return ';'.join([k.lower() for k in sorted(headers.keys())]) def _get_canonical_headers(self, headers): return '\n'.join([':'.join([k.lower(), str(v).strip()]) for k, v in sorted(headers.items())]) + '\n' def _get_payload_hash(self, method, data=None): if method in ('POST', 'PUT'): if data: if hasattr(data, 'next') or hasattr(data, '__next__'): # File upload; don't try to read the entire payload return UNSIGNED_PAYLOAD return _hash(data) else: return UNSIGNED_PAYLOAD else: return _hash('') def _get_request_params(self, params): # For self.method == GET return '&'.join(["%s=%s" % (urlquote(k, safe=''), urlquote(str(v), safe='~')) for k, v in sorted(params.items())]) def _get_canonical_request(self, params, headers, method, path, data): return '\n'.join([ method, path, self._get_request_params(params), self._get_canonical_headers(headers), self._get_signed_headers(headers), self._get_payload_hash(method, data) ]) class SignedAWSConnection(AWSTokenConnection): version = None def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None, signature_version=DEFAULT_SIGNATURE_VERSION): super(SignedAWSConnection, self).__init__(user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, token=token, retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url) self.signature_version = str(signature_version) if self.signature_version == '2': signer_cls = AWSRequestSignerAlgorithmV2 elif self.signature_version == '4': signer_cls = AWSRequestSignerAlgorithmV4 else: raise ValueError('Unsupported signature_version: %s' % (signature_version)) self.signer = signer_cls(access_key=self.user_id, access_secret=self.key, version=self.version, connection=self) def add_default_params(self, params): params = self.signer.get_request_params(params=params, method=self.method, path=self.action) return params def pre_connect_hook(self, params, headers): params, headers = self.signer.get_request_headers(params=params, headers=headers, method=self.method, path=self.action, data=self.data) return params, headers class AWSJsonResponse(JsonResponse): """ Amazon ECS response class. ECS API uses JSON unlike the s3, elb drivers """ def parse_error(self): response = json.loads(self.body) code = response['__type'] message = response.get('Message', response['message']) return ('%s: %s' % (code, message)) def _sign(key, msg, hex=False): if hex: return hmac.new(b(key), b(msg), hashlib.sha256).hexdigest() else: return hmac.new(b(key), b(msg), hashlib.sha256).digest() def _hash(msg): return hashlib.sha256(b(msg)).hexdigest() class AWSDriver(BaseDriver): def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=None, token=None, **kwargs): self.token = token super(AWSDriver, self).__init__(key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, token=token, **kwargs) def _ex_connection_class_kwargs(self): kwargs = super(AWSDriver, self)._ex_connection_class_kwargs() kwargs['token'] = self.token return kwargs apache-libcloud-2.2.1/libcloud/common/zonomi.py0000664000175000017500000001053612705475361021407 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import XmlResponse from libcloud.common.base import ConnectionKey __all__ = [ 'ZonomiException', 'ZonomiResponse', 'ZonomiConnection' ] # Endpoint for Zonomi API. API_HOST = 'zonomi.com' SPECIAL_ERRORS = [ 'Not found.', 'ERROR: This zone is already in your zone list.', 'Record not deleted.' ] class ZonomiException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "ZonomiException %s %s" % (self.code, self.message) class ZonomiResponse(XmlResponse): errors = None objects = None def __init__(self, response, connection): self.errors = [] super(ZonomiResponse, self).__init__(response=response, connection=connection) self.objects, self.errors = self.parse_body_and_errors() if self.errors: raise self._make_excp(self.errors[0]) def parse_body_and_errors(self): error_dict = {} actions = None result_counts = None action_childrens = None data = [] errors = [] xml_body = super(ZonomiResponse, self).parse_body() # pylint: disable=no-member # Error handling if xml_body.text is not None and xml_body.tag == 'error': error_dict['ERRORCODE'] = self.status if xml_body.text.startswith('ERROR: No zone found for'): error_dict['ERRORCODE'] = '404' error_dict['ERRORMESSAGE'] = 'Not found.' else: error_dict['ERRORMESSAGE'] = xml_body.text errors.append(error_dict) # Data handling childrens = xml_body.getchildren() if len(childrens) == 3: result_counts = childrens[1] actions = childrens[2] if actions is not None: actions_childrens = actions.getchildren() action = actions_childrens[0] action_childrens = action.getchildren() if action_childrens is not None: for child in action_childrens: if child.tag == 'zone' or child.tag == 'record': data.append(child.attrib) if result_counts is not None and \ result_counts.attrib.get('deleted') == '1': data.append('DELETED') if result_counts is not None and \ result_counts.attrib.get('deleted') == '0' and \ action.get('action') == 'DELETE': error_dict['ERRORCODE'] = self.status error_dict['ERRORMESSAGE'] = 'Record not deleted.' errors.append(error_dict) return (data, errors) def success(self): return (len(self.errors) == 0) def _make_excp(self, error): """ :param error: contains error code and error message :type error: dict """ return ZonomiException(error['ERRORCODE'], error['ERRORMESSAGE']) class ZonomiConnection(ConnectionKey): host = API_HOST responseCls = ZonomiResponse def add_default_params(self, params): """ Adds default parameters to perform a request, such as api_key. """ params['api_key'] = self.key return params def add_default_headers(self, headers): """ Adds default headers needed to perform a successful request such as Content-Type, User-Agent. """ headers['Content-Type'] = 'text/xml;charset=UTF-8' return headers apache-libcloud-2.2.1/libcloud/common/nsone.py0000664000175000017500000000304412701023453021175 0ustar kamikami00000000000000from libcloud.common.base import ConnectionKey, JsonResponse __all__ = [ 'API_HOST', 'NsOneException', 'NsOneResponse', 'NsOneConnection' ] # Endpoint for nsone api API_HOST = 'api.nsone.net' class NsOneResponse(JsonResponse): errors = [] objects = [] def __init__(self, response, connection): super(NsOneResponse, self).__init__(response=response, connection=connection) self.errors, self.objects = self.parse_body_and_errors() if not self.success(): raise NsOneException(code=self.status, message=self.errors.pop()['message']) def parse_body_and_errors(self): js = super(NsOneResponse, self).parse_body() if 'message' in js: self.errors.append(js) else: self.objects.append(js) return self.errors, self.objects def success(self): return len(self.errors) == 0 class NsOneConnection(ConnectionKey): host = API_HOST responseCls = NsOneResponse def add_default_headers(self, headers): headers['Content-Type'] = 'application/json' headers['X-NSONE-KEY'] = self.key return headers class NsOneException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "NsOneException %s %s" % (self.code, self.message) apache-libcloud-2.2.1/libcloud/common/worldwidedns.py0000664000175000017500000001611412705475361022577 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import Response from libcloud.common.types import ProviderError OK_CODES = ['200', '211', '212', '213'] ERROR_CODES = ['401', '403', '405', '406', '407', '408', '409', '410', '411', '412', '413', '414', '450', '451'] class WorldWideDNSException(ProviderError): def __init__(self, value, http_code, code, driver=None): self.code = code super(WorldWideDNSException, self).__init__(value, http_code, driver) class SuspendedAccount(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Login ID you supplied is SUSPENDED, you need to renew" + \ " your account" super(SuspendedAccount, self).__init__(value, http_code, 401, driver) class LoginOrPasswordNotMatch(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Login ID and/or Password you supplied is not on file or" + \ " does not match" super(LoginOrPasswordNotMatch, self).__init__(value, http_code, 403, driver) class NonExistentDomain(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Domain name supplied is not in your account" super(NonExistentDomain, self).__init__(value, http_code, 405, driver) class CouldntRemoveDomain(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Error occured removing domain from name server, try again" super(CouldntRemoveDomain, self).__init__(value, http_code, 406, driver) class LimitExceeded(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Your limit was exceeded, you need to upgrade your account" super(LimitExceeded, self).__init__(value, http_code, 407, driver) class ExistentDomain(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Domain already exists on our servers" super(ExistentDomain, self).__init__(value, http_code, 408, driver) class DomainBanned(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Domain is listed in DNSBL and is banned from our servers" super(DomainBanned, self).__init__(value, http_code, 409, driver) class InvalidDomainName(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Invalid domain name" super(InvalidDomainName, self).__init__(value, http_code, 410, driver) class ErrorOnReloadInNameServer(WorldWideDNSException): def __init__(self, server, http_code, driver=None): if server == 1: value = "Name server #1 kicked an error on reload, contact support" code = 411 elif server == 2: value = "Name server #2 kicked an error on reload, contact support" code = 412 elif server == 3: value = "Name server #3 kicked an error on reload, contact support" code = 413 super(ErrorOnReloadInNameServer, self).__init__(value, http_code, code, driver) class NewUserNotValid(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "New userid is not valid" super(NewUserNotValid, self).__init__(value, http_code, 414, driver) class CouldntReachNameServer(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "Couldn't reach the name server, try again later" super(CouldntReachNameServer, self).__init__(value, http_code, 450, driver) class NoZoneFile(WorldWideDNSException): def __init__(self, http_code, driver=None): value = "No zone file in the name server queried" super(NoZoneFile, self).__init__(value, http_code, 451, driver) ERROR_CODE_TO_EXCEPTION_CLS = { '401': SuspendedAccount, '403': LoginOrPasswordNotMatch, '405': NonExistentDomain, '406': CouldntRemoveDomain, '407': LimitExceeded, '408': ExistentDomain, '409': DomainBanned, '410': InvalidDomainName, '411': ErrorOnReloadInNameServer, '412': ErrorOnReloadInNameServer, '413': ErrorOnReloadInNameServer, '414': NewUserNotValid, '450': CouldntReachNameServer, '451': NoZoneFile, } class WorldWideDNSResponse(Response): def parse_body(self): """ Parse response body. :return: Parsed body. :rtype: ``str`` """ if self._code_response(self.body): codes = re.split('\r?\n', self.body) for code in codes: if code in OK_CODES: continue elif code in ERROR_CODES: exception = ERROR_CODE_TO_EXCEPTION_CLS.get(code) if code in ['411', '412', '413']: server = int(code[2]) raise exception(server, self.status) raise exception(self.status) return self.body def _code_response(self, body): """ Checks if the response body contains code status. :rtype: ``bool`` """ available_response_codes = OK_CODES + ERROR_CODES codes = re.split('\r?\n', body) if codes[0] in available_response_codes: return True return False class WorldWideDNSConnection(ConnectionUserAndKey): host = 'www.worldwidedns.net' responseCls = WorldWideDNSResponse def add_default_params(self, params): """ Add parameters that are necessary for every request This method adds ``NAME`` and ``PASSWORD`` to the request. """ params["NAME"] = self.user_id params["PASSWORD"] = self.key reseller_id = getattr(self, 'reseller_id', None) if reseller_id: params["ID"] = reseller_id return params apache-libcloud-2.2.1/libcloud/common/dnspod.py0000664000175000017500000000463413153541406021355 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import ConnectionKey, JsonResponse __all__ = [ 'API_HOST', 'DNSPodException', 'DNSPodResponse', 'DNSPodConnection' ] # Endpoint for dnspod api API_HOST = 'api.dnspod.com' class DNSPodResponse(JsonResponse): errors = [] objects = [] def __init__(self, response, connection): super(DNSPodResponse, self).__init__(response=response, connection=connection) self.errors, self.objects = self.parse_body_and_errors() if not self.success(): raise DNSPodException(code=self.status, message=self.errors.pop() ['status']['message']) def parse_body_and_errors(self): js = super(DNSPodResponse, self).parse_body() if 'status' in js and js['status']['code'] != '1': self.errors.append(js) else: self.objects.append(js) return self.errors, self.objects def success(self): return len(self.errors) == 0 class DNSPodConnection(ConnectionKey): host = API_HOST responseCls = DNSPodResponse def add_default_headers(self, headers): headers['Content-Type'] = 'application/x-www-form-urlencoded' headers['Accept'] = 'text/json' return headers class DNSPodException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "DNSPodException %s %s" % (self.code, self.message) apache-libcloud-2.2.1/libcloud/common/digitalocean.py0000664000175000017500000001552213153541406022507 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common settings and connection objects for DigitalOcean Cloud """ from libcloud.utils.py3 import httplib, parse_qs, urlparse from libcloud.common.base import BaseDriver from libcloud.common.base import ConnectionKey from libcloud.common.base import JsonResponse from libcloud.common.types import LibcloudError, InvalidCredsError __all__ = [ 'DigitalOcean_v2_Response', 'DigitalOcean_v2_Connection', 'DigitalOceanBaseDriver' ] class DigitalOcean_v1_Error(LibcloudError): """ Exception for when attempting to use version 1 of the DigitalOcean API which is no longer supported. """ def __init__(self, value=('Driver no longer supported: Version 1 of the ' 'DigitalOcean API reached end of life on November 9, ' '2015. Use the v2 driver. Please visit: ' 'https://developers.digitalocean.com/documentation/changelog/api-v1/sunsetting-api-v1/'), # noqa: E501 driver=None): super(DigitalOcean_v1_Error, self).__init__(value, driver=driver) class DigitalOcean_v2_Response(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_error(self): if self.status == httplib.UNAUTHORIZED: body = self.parse_body() raise InvalidCredsError(body['message']) else: body = self.parse_body() if 'message' in body: error = '%s (code: %s)' % (body['message'], self.status) else: error = body return error def success(self): return self.status in self.valid_response_codes class DigitalOcean_v2_Connection(ConnectionKey): """ Connection class for the DigitalOcean (v2) driver. """ host = 'api.digitalocean.com' responseCls = DigitalOcean_v2_Response def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ headers['Authorization'] = 'Bearer %s' % (self.key) headers['Content-Type'] = 'application/json' return headers def add_default_params(self, params): """ Add parameters that are necessary for every request This method adds ``per_page`` to the request to reduce the total number of paginated requests to the API. """ params['per_page'] = self.driver.ex_per_page return params class DigitalOceanConnection(DigitalOcean_v2_Connection): """ Connection class for the DigitalOcean driver. """ pass class DigitalOceanResponse(DigitalOcean_v2_Response): pass class DigitalOceanBaseDriver(BaseDriver): """ DigitalOcean BaseDriver """ name = 'DigitalOcean' website = 'https://www.digitalocean.com' def __new__(cls, key, secret=None, api_version='v2', **kwargs): if cls is DigitalOceanBaseDriver: if api_version == 'v1' or secret is not None: raise DigitalOcean_v1_Error() elif api_version == 'v2': cls = DigitalOcean_v2_BaseDriver else: raise NotImplementedError('Unsupported API version: %s' % (api_version)) return super(DigitalOceanBaseDriver, cls).__new__(cls, **kwargs) def ex_account_info(self): raise NotImplementedError( 'ex_account_info not implemented for this driver') def ex_list_events(self): raise NotImplementedError( 'ex_list_events not implemented for this driver') def ex_get_event(self, event_id): raise NotImplementedError( 'ex_get_event not implemented for this driver') def _paginated_request(self, url, obj): raise NotImplementedError( '_paginated_requests not implemented for this driver') class DigitalOcean_v2_BaseDriver(DigitalOceanBaseDriver): """ DigitalOcean BaseDriver using v2 of the API. Supports `ex_per_page` ``int`` value keyword parameter to adjust per page requests against the API. """ connectionCls = DigitalOcean_v2_Connection def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=None, ex_per_page=200, **kwargs): self.ex_per_page = ex_per_page super(DigitalOcean_v2_BaseDriver, self).__init__(key, **kwargs) def ex_account_info(self): return self.connection.request('/v2/account').object['account'] def ex_list_events(self): return self._paginated_request('/v2/actions', 'actions') def ex_get_event(self, event_id): """ Get an event object :param event_id: Event id (required) :type event_id: ``str`` """ params = {} return self.connection.request('/v2/actions/%s' % event_id, params=params).object['action'] def _paginated_request(self, url, obj): """ Perform multiple calls in order to have a full list of elements when the API responses are paginated. :param url: API endpoint :type url: ``str`` :param obj: Result object key :type obj: ``str`` :return: ``list`` of API response objects :rtype: ``list`` """ params = {} data = self.connection.request(url) try: query = urlparse.urlparse(data.object['links']['pages']['last']) # The query[4] references the query parameters from the url pages = parse_qs(query[4])['page'][0] values = data.object[obj] for page in range(2, int(pages) + 1): params.update({'page': page}) new_data = self.connection.request(url, params=params) more_values = new_data.object[obj] for value in more_values: values.append(value) data = values except KeyError: # No pages. data = data.object[obj] return data apache-libcloud-2.2.1/libcloud/common/google.py0000664000175000017500000007360513153541406021346 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for Google Connection and Authentication classes. Information about setting up your Google OAUTH2 credentials: For libcloud, there are two basic methods for authenticating to Google using OAUTH2: Service Accounts and Client IDs for Installed Applications. Both are initially set up from the Cloud Console Console - https://cloud.google.com/console Setting up Service Account authentication (note that you need the PyCrypto package installed to use this): - Go to the Console - Go to your project and then to "APIs & auth" on the left - Click on "Credentials" - Click on "Create New Client ID..." - Select "Service account" and click on "Create Client ID" - Download the Private Key (should happen automatically). The key you download is in JSON format. - Move the .json file to a safe location. - Optionally, you may choose to Generate a PKCS12 key from the Console. It needs to be converted to the PEM format. Please note, the PKCS12 format is deprecated and may be removed in a future release. - Convert the key using OpenSSL (the default password is 'notasecret'). - Move the .pem file to a safe location. - To Authenticate, you will need to pass the Service Account's "Email address" in as the user_id and the path to the .pem file as the key. Setting up Installed Application authentication: - Go to the Console - Go to your project and then to "APIs & auth" on the left - Click on "Credentials" - Select "Installed application" and "Other" then click on "Create Client ID" - To Authenticate, pass in the "Client ID" as the user_id and the "Client secret" as the key - The first time that you do this, the libcloud will give you a URL to visit. Copy and paste the URL into a browser. - When you go to the URL it will ask you to log in (if you aren't already) and ask you if you want to allow the project access to your account. - Click on Accept and you will be given a code. - Paste that code at the prompt given to you by the Google libcloud connection. - At that point, a token & refresh token will be stored in your home directory and will be used for authentication. Please remember to secure your keys and access tokens. """ from __future__ import with_statement try: import simplejson as json except ImportError: import json import logging import base64 import errno import time import datetime import os import socket import sys from libcloud.utils.connection import get_response_object from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3 from libcloud.common.base import (ConnectionUserAndKey, JsonResponse, PollingConnection) from libcloud.common.types import (ProviderError, LibcloudError) try: from Crypto.Hash import SHA256 from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 import Crypto.Random Crypto.Random.atfork() except ImportError: # The pycrypto library is unavailable SHA256 = None RSA = None PKCS1_v1_5 = None UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ' LOG = logging.getLogger(__name__) def _utcnow(): """ Mocked in libcloud.test.common.google.GoogleTestCase. """ return datetime.datetime.utcnow() def _utc_timestamp(datetime_obj): """ Return string of datetime_obj in the UTC Timestamp Format """ return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT) def _from_utc_timestamp(timestamp): """ Return datetime obj where date and time are pulled from timestamp string. """ return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT) def _get_gce_metadata(path=''): try: url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/') headers = {'Metadata-Flavor': 'Google'} response = get_response_object(url, headers=headers) return response.status, '', response.body except Exception as e: return -1, str(e), None class GoogleAuthError(LibcloudError): """Generic Error class for various authentication errors.""" def __init__(self, value): self.value = value def __repr__(self): return repr(self.value) class GoogleBaseError(ProviderError): def __init__(self, value, http_code, code, driver=None): self.code = code super(GoogleBaseError, self).__init__(value, http_code, driver) class InvalidRequestError(GoogleBaseError): pass class JsonParseError(GoogleBaseError): pass class ResourceNotFoundError(GoogleBaseError): def __init__(self, value, http_code, code, driver=None): self.code = code if isinstance(value, dict) and 'message' in value and \ value['message'].count('/') == 1 and \ value['message'].count('projects/') == 1: value['message'] = value['message'] + ". A missing project " \ "error may be an authentication issue. " \ "Please ensure your auth credentials match " \ "your project. " super(ResourceNotFoundError, self).__init__(value, http_code, driver) class QuotaExceededError(GoogleBaseError): pass class ResourceExistsError(GoogleBaseError): pass class ResourceInUseError(GoogleBaseError): pass class GoogleResponse(JsonResponse): """ Google Base Response class. """ def success(self): """ Determine if the request was successful. For the Google response class, tag all responses as successful and raise appropriate Exceptions from parse_body. :return: C{True} """ return True def _get_error(self, body): """ Get the error code and message from a JSON response. Return just the first error if there are multiple errors. :param body: The body of the JSON response dictionary :type body: ``dict`` :return: Tuple containing error code and message :rtype: ``tuple`` of ``str`` or ``int`` """ if 'errors' in body['error']: err = body['error']['errors'][0] else: err = body['error'] if 'code' in err: code = err.get('code') message = err.get('message') else: code = None if 'reason' in err: code = err.get('reason') message = body.get('error_description', err) return (code, message) def parse_body(self): """ Parse the JSON response body, or raise exceptions as appropriate. :return: JSON dictionary :rtype: ``dict`` """ if len(self.body) == 0 and not self.parse_zero_length_body: return self.body json_error = False try: body = json.loads(self.body) except: # If there is both a JSON parsing error and an unsuccessful http # response (like a 404), we want to raise the http error and not # the JSON one, so don't raise JsonParseError here. body = self.body json_error = True valid_http_codes = [ httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT, ] if self.status in valid_http_codes: if json_error: raise JsonParseError(body, self.status, None) elif 'error' in body: (code, message) = self._get_error(body) if code == 'QUOTA_EXCEEDED': raise QuotaExceededError(message, self.status, code) elif code == 'RESOURCE_ALREADY_EXISTS': raise ResourceExistsError(message, self.status, code) elif code == 'alreadyExists': raise ResourceExistsError(message, self.status, code) elif code.startswith('RESOURCE_IN_USE'): raise ResourceInUseError(message, self.status, code) else: raise GoogleBaseError(message, self.status, code) else: return body elif self.status == httplib.NOT_FOUND: if (not json_error) and ('error' in body): (code, message) = self._get_error(body) else: message = body code = None raise ResourceNotFoundError(message, self.status, code) elif self.status == httplib.BAD_REQUEST: if (not json_error) and ('error' in body): (code, message) = self._get_error(body) else: message = body code = None raise InvalidRequestError(message, self.status, code) else: if (not json_error) and ('error' in body): (code, message) = self._get_error(body) else: message = body code = None raise GoogleBaseError(message, self.status, code) class GoogleBaseDriver(object): name = "Google API" class GoogleBaseAuthConnection(ConnectionUserAndKey): """ Base class for Google Authentication. Should be subclassed for specific types of authentication. """ driver = GoogleBaseDriver responseCls = GoogleResponse name = 'Google Auth' host = 'accounts.google.com' auth_path = '/o/oauth2/auth' def __init__(self, user_id, key=None, scopes=None, redirect_uri='urn:ietf:wg:oauth:2.0:oob', login_hint=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :param scopes: A list of urls defining the scope of authentication to grant. :type scopes: ``list`` :keyword redirect_uri: The Redirect URI for the authentication request. See Google OAUTH2 documentation for more info. :type redirect_uri: ``str`` :keyword login_hint: Login hint for authentication request. Useful for Installed Application authentication. :type login_hint: ``str`` """ scopes = scopes or [] self.scopes = " ".join(scopes) self.redirect_uri = redirect_uri self.login_hint = login_hint super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs) def add_default_headers(self, headers): """ Add defaults for 'Content-Type' and 'Host' headers. """ headers['Content-Type'] = "application/x-www-form-urlencoded" headers['Host'] = self.host return headers def _token_request(self, request_body): """ Return an updated token from a token request body. :param request_body: A dictionary of values to send in the body of the token request. :type request_body: ``dict`` :return: A dictionary with updated token information :rtype: ``dict`` """ data = urlencode(request_body) try: response = self.request('/o/oauth2/token', method='POST', data=data) except AttributeError: raise GoogleAuthError('Invalid authorization response, please ' 'check your credentials and time drift.') token_info = response.object if 'expires_in' in token_info: expire_time = _utcnow() + datetime.timedelta( seconds=token_info['expires_in']) token_info['expire_time'] = _utc_timestamp(expire_time) return token_info def refresh_token(self, token_info): """ Refresh the current token. Fetch an updated refresh token from internal metadata service. :param token_info: Dictionary containing token information. (Not used, but here for compatibility) :type token_info: ``dict`` :return: A dictionary containing updated token information. :rtype: ``dict`` """ # pylint: disable=no-member return self.get_new_token() class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection): """Authentication connection for "Installed Application" authentication.""" def get_code(self): """ Give the user a URL that they can visit to authenticate and obtain a code. This method will ask for that code that the user can paste in. Mocked in libcloud.test.common.google.GoogleTestCase. :return: Code supplied by the user after authenticating :rtype: ``str`` """ auth_params = {'response_type': 'code', 'client_id': self.user_id, 'redirect_uri': self.redirect_uri, 'scope': self.scopes, 'state': 'Libcloud Request'} if self.login_hint: auth_params['login_hint'] = self.login_hint data = urlencode(auth_params) url = 'https://%s%s?%s' % (self.host, self.auth_path, data) print('\nPlease Go to the following URL and sign in:') print(url) if PY3: code = input('Enter Code: ') else: code = raw_input('Enter Code: ') return code def get_new_token(self): """ Get a new token. Generally used when no previous token exists or there is no refresh token :return: Dictionary containing token information :rtype: ``dict`` """ # Ask the user for a code code = self.get_code() token_request = {'code': code, 'client_id': self.user_id, 'client_secret': self.key, 'redirect_uri': self.redirect_uri, 'grant_type': 'authorization_code'} return self._token_request(token_request) def refresh_token(self, token_info): """ Use the refresh token supplied in the token info to get a new token. :param token_info: Dictionary containing current token information :type token_info: ``dict`` :return: A dictionary containing updated token information. :rtype: ``dict`` """ if 'refresh_token' not in token_info: return self.get_new_token() refresh_request = {'refresh_token': token_info['refresh_token'], 'client_id': self.user_id, 'client_secret': self.key, 'grant_type': 'refresh_token'} new_token = self._token_request(refresh_request) if 'refresh_token' not in new_token: new_token['refresh_token'] = token_info['refresh_token'] return new_token class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection): """Authentication class for "Service Account" authentication.""" def __init__(self, user_id, key, *args, **kwargs): """ Check to see if PyCrypto is available, and convert key file path into a key string if the key is in a file. :param user_id: Email address to be used for Service Account authentication. :type user_id: ``str`` :param key: The RSA Key or path to file containing the key. :type key: ``str`` """ if SHA256 is None: raise GoogleAuthError('PyCrypto library required for ' 'Service Account Authentication.') # Check to see if 'key' is a file and read the file if it is. if key.find("PRIVATE KEY---") == -1: # key is a file keypath = os.path.expanduser(key) is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise ValueError("Missing (or not readable) key " "file: '%s'" % key) with open(keypath, 'r') as f: contents = f.read() try: key = json.loads(contents) key = key['private_key'] except ValueError: key = contents super(GoogleServiceAcctAuthConnection, self).__init__( user_id, key, *args, **kwargs) def get_new_token(self): """ Get a new token using the email address and RSA Key. :return: Dictionary containing token information :rtype: ``dict`` """ # The header is always the same header = {'alg': 'RS256', 'typ': 'JWT'} header_enc = base64.urlsafe_b64encode(b(json.dumps(header))) # Construct a claim set claim_set = {'iss': self.user_id, 'scope': self.scopes, 'aud': 'https://accounts.google.com/o/oauth2/token', 'exp': int(time.time()) + 3600, 'iat': int(time.time())} claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set))) # The message contains both the header and claim set message = b'.'.join((header_enc, claim_set_enc)) # Then the message is signed using the key supplied key = RSA.importKey(self.key) hash_func = SHA256.new(message) signer = PKCS1_v1_5.new(key) signature = base64.urlsafe_b64encode(signer.sign(hash_func)) # Finally the message and signature are sent to get a token jwt = b'.'.join((message, signature)) request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', 'assertion': jwt} return self._token_request(request) class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection): """Authentication class for self-authentication when used with a GCE instance that supports serviceAccounts. """ def get_new_token(self): """ Get a new token from the internal metadata service. :return: Dictionary containing token information :rtype: ``dict`` """ path = '/instance/service-accounts/default/token' http_code, http_reason, token_info = _get_gce_metadata(path) if http_code == httplib.NOT_FOUND: raise ValueError("Service Accounts are not enabled for this " "GCE instance.") if http_code != httplib.OK: raise ValueError("Internal GCE Authorization failed: " "'%s'" % str(http_reason)) token_info = json.loads(token_info) if 'expires_in' in token_info: expire_time = _utcnow() + datetime.timedelta( seconds=token_info['expires_in']) token_info['expire_time'] = _utc_timestamp(expire_time) return token_info class GoogleAuthType(object): """ SA (Service Account), IA (Installed Application), GCE (Auth from a GCE instance with service account enabled) GCS_S3 (Cloud Storage S3 interoperability authentication) """ SA = 'SA' IA = 'IA' GCE = 'GCE' GCS_S3 = 'GCS_S3' ALL_TYPES = [SA, IA, GCE, GCS_S3] OAUTH2_TYPES = [SA, IA, GCE] @classmethod def guess_type(cls, user_id): if cls._is_sa(user_id): return cls.SA elif cls._is_gcs_s3(user_id): return cls.GCS_S3 elif cls._is_gce(): return cls.GCE else: return cls.IA @classmethod def is_oauth2(cls, auth_type): return auth_type in cls.OAUTH2_TYPES @staticmethod def _is_gce(): """ Checks if we can access the GCE metadata server. Mocked in libcloud.test.common.google.GoogleTestCase. """ http_code, http_reason, body = _get_gce_metadata() if http_code == httplib.OK and body: return True return False @staticmethod def _is_gcs_s3(user_id): """ Checks S3 key format: 20 alphanumeric chars starting with GOOG. """ return len(user_id) == 20 and user_id.startswith('GOOG') @staticmethod def _is_sa(user_id): return user_id.endswith('.gserviceaccount.com') class GoogleOAuth2Credential(object): default_credential_file = '~/.google_libcloud_auth' def __init__(self, user_id, key, auth_type=None, credential_file=None, scopes=None, **kwargs): self.auth_type = auth_type or GoogleAuthType.guess_type(user_id) if self.auth_type not in GoogleAuthType.ALL_TYPES: raise GoogleAuthError('Invalid auth type: %s' % self.auth_type) if not GoogleAuthType.is_oauth2(self.auth_type): raise GoogleAuthError(('Auth type %s cannot be used with OAuth2' % self.auth_type)) self.user_id = user_id self.key = key default_credential_file = '.'.join([self.default_credential_file, user_id]) self.credential_file = credential_file or default_credential_file # Default scopes to read/write for compute, storage, and dns. self.scopes = scopes or [ 'https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/devstorage.full_control', 'https://www.googleapis.com/auth/ndev.clouddns.readwrite', ] self.token = self._get_token_from_file() if self.auth_type == GoogleAuthType.GCE: self.oauth2_conn = GoogleGCEServiceAcctAuthConnection( self.user_id, self.scopes, **kwargs) elif self.auth_type == GoogleAuthType.SA: self.oauth2_conn = GoogleServiceAcctAuthConnection( self.user_id, self.key, self.scopes, **kwargs) elif self.auth_type == GoogleAuthType.IA: self.oauth2_conn = GoogleInstalledAppAuthConnection( self.user_id, self.key, self.scopes, **kwargs) else: raise GoogleAuthError('Invalid auth_type: %s' % str(self.auth_type)) if self.token is None: self.token = self.oauth2_conn.get_new_token() self._write_token_to_file() @property def access_token(self): if self.token_expire_utc_datetime < _utcnow(): self._refresh_token() return self.token['access_token'] @property def token_expire_utc_datetime(self): return _from_utc_timestamp(self.token['expire_time']) def _refresh_token(self): self.token = self.oauth2_conn.refresh_token(self.token) self._write_token_to_file() def _get_token_from_file(self): """ Read credential file and return token information. Mocked in libcloud.test.common.google.GoogleTestCase. :return: Token information dictionary, or None :rtype: ``dict`` or ``None`` """ token = None filename = os.path.realpath(os.path.expanduser(self.credential_file)) try: with open(filename, 'r') as f: data = f.read() token = json.loads(data) except (IOError, ValueError): # Note: File related errors (IOError) and errors related to json # parsing of the data (ValueError) are not fatal. e = sys.exc_info()[1] LOG.info('Failed to read cached auth token from file "%s": %s', filename, str(e)) return token def _write_token_to_file(self): """ Write token to credential file. Mocked in libcloud.test.common.google.GoogleTestCase. """ filename = os.path.expanduser(self.credential_file) filename = os.path.realpath(filename) try: data = json.dumps(self.token) write_flags = os.O_CREAT | os.O_WRONLY | os.O_TRUNC with os.fdopen(os.open(filename, write_flags, int('600', 8)), 'w') as f: f.write(data) except: # Note: Failure to write (cache) token in a file is not fatal. It # simply means degraded performance since we will need to acquire a # new token each time script runs. e = sys.exc_info()[1] LOG.info('Failed to write auth token to file "%s": %s', filename, str(e)) class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): """Base connection class for interacting with Google APIs.""" driver = GoogleBaseDriver responseCls = GoogleResponse host = 'www.googleapis.com' poll_interval = 2.0 timeout = 180 def __init__(self, user_id, key=None, auth_type=None, credential_file=None, scopes=None, **kwargs): """ Determine authentication type, set up appropriate authentication connection and get initial authentication information. :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :keyword auth_type: See GoogleAuthType class for list and description of accepted values. If not supplied, auth_type will be guessed based on value of user_id or if the code is running on a GCE instance. :type auth_type: ``str`` :keyword credential_file: Path to file for caching authentication information. :type credential_file: ``str`` :keyword scopes: List of OAuth2 scope URLs. The empty default sets read/write access to Compute, Storage, and DNS. :type scopes: ``list`` """ super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs) self.oauth2_credential = GoogleOAuth2Credential( user_id, key, auth_type, credential_file, scopes, **kwargs) python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1], sys.version_info[2]) ver_platform = 'Python %s/%s' % (python_ver, sys.platform) self.user_agent_append(ver_platform) def add_default_headers(self, headers): """ @inherits: :class:`Connection.add_default_headers` """ headers['Content-Type'] = 'application/json' headers['Host'] = self.host return headers def pre_connect_hook(self, params, headers): """ Check to make sure that token hasn't expired. If it has, get an updated token. Also, add the token to the headers. @inherits: :class:`Connection.pre_connect_hook` """ headers['Authorization'] = ('Bearer ' + self.oauth2_credential.access_token) return params, headers def encode_data(self, data): """Encode data to JSON""" return json.dumps(data) def request(self, *args, **kwargs): """ @inherits: :class:`Connection.request` """ # Adds some retry logic for the occasional # "Connection Reset by peer" error. retries = 4 tries = 0 while tries < (retries - 1): try: return super(GoogleBaseConnection, self).request( *args, **kwargs) except socket.error: e = sys.exc_info()[1] if e.errno == errno.ECONNRESET: tries = tries + 1 else: raise e # One more time, then give up. return super(GoogleBaseConnection, self).request(*args, **kwargs) def has_completed(self, response): """ Determine if operation has completed based on response. :param response: JSON response :type response: I{responseCls} :return: True if complete, False otherwise :rtype: ``bool`` """ if response.object['status'] == 'DONE': return True else: return False def get_poll_request_kwargs(self, response, context, request_kwargs): """ @inherits: :class:`PollingConnection.get_poll_request_kwargs` """ return {'action': response.object['selfLink']} def morph_action_hook(self, action): """ Update action to correct request path. In many places, the Google API returns a full URL to a resource. This will strip the scheme and host off of the path and just return the request. Otherwise, it will prepend the base request_path to the action. :param action: The action to be called in the http request :type action: ``str`` :return: The modified request based on the action :rtype: ``str`` """ if action.startswith('https://'): u = urlparse.urlsplit(action) request = urlparse.urlunsplit(('', '', u[2], u[3], u[4])) else: request = self.request_path + action return request apache-libcloud-2.2.1/libcloud/common/abiquo.py0000664000175000017500000002411312705475361021350 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abiquo Utilities Module for the Abiquo Driver. Common utilities needed by the :class:`AbiquoNodeDriver`. """ import base64 from libcloud.common.base import ConnectionUserAndKey, PollingConnection from libcloud.common.base import XmlResponse from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import b from libcloud.compute.base import NodeState def get_href(element, rel): """ Search a RESTLink element in the :class:`AbiquoResponse`. Abiquo, as a REST API, it offers self-discovering functionality. That means that you could walk through the whole API only navigating from the links offered by the entities. This is a basic method to find the 'relations' of an entity searching into its links. For instance, a Rack entity serialized as XML as the following:: false 1 racacaca 10 4094 2 1 offers link to datacenters (rel='datacenter'), to itself (rel='edit') and to the machines defined in it (rel='machines') A call to this method with the 'rack' element using 'datacenter' as 'rel' will return: 'http://10.60.12.7:80/api/admin/datacenters/1' :type element: :class:`xml.etree.ElementTree` :param element: Xml Entity returned by Abiquo API (required) :type rel: ``str`` :param rel: relation link name :rtype: ``str`` :return: the 'href' value according to the 'rel' input parameter """ links = element.findall('link') for link in links: if link.attrib['rel'] == rel: href = link.attrib['href'] # href is something like: # # 'http://localhost:80/api/admin/enterprises' # # we are only interested in '/admin/enterprises/' part needle = '/api/' url_path = urlparse.urlparse(href).path index = url_path.find(needle) result = url_path[index + len(needle) - 1:] return result class AbiquoResponse(XmlResponse): """ Abiquo XML Response. Wraps the response in XML bodies or extract the error data in case of error. """ # Map between abiquo state and Libcloud State NODE_STATE_MAP = { 'NOT_ALLOCATED': NodeState.TERMINATED, 'ALLOCATED': NodeState.PENDING, 'CONFIGURED': NodeState.PENDING, 'ON': NodeState.RUNNING, 'PAUSED': NodeState.PENDING, 'OFF': NodeState.PENDING, 'LOCKED': NodeState.PENDING, 'UNKNOWN': NodeState.UNKNOWN } def parse_error(self): """ Parse the error messages. Response body can easily be handled by this class parent :class:`XmlResponse`, but there are use cases which Abiquo API does not respond an XML but an HTML. So we need to handle these special cases. """ if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError(driver=self.connection.driver) elif self.status == httplib.FORBIDDEN: raise ForbiddenError(self.connection.driver) elif self.status == httplib.NOT_ACCEPTABLE: raise LibcloudError('Not Acceptable') else: parsebody = self.parse_body() if parsebody is not None and hasattr(parsebody, 'findall'): errors = self.parse_body().findall('error') # Most of the exceptions only have one error raise LibcloudError(errors[0].findtext('message')) else: raise LibcloudError(self.body) def success(self): """ Determine if the request was successful. Any of the 2XX HTTP response codes are accepted as successful requests :rtype: ``bool`` :return: successful request or not. """ return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.ACCEPTED] def async_success(self): """ Determinate if async request was successful. An async_request retrieves for a task object that can be successfully retrieved (self.status == OK), but the asynchronous task (the body of the HTTP response) which we are asking for has finished with an error. So this method checks if the status code is 'OK' and if the task has finished successfully. :rtype: ``bool`` :return: successful asynchronous request or not """ if self.success(): # So we have a 'task' object in the body task = self.parse_body() return task.findtext('state') == 'FINISHED_SUCCESSFULLY' else: return False class AbiquoConnection(ConnectionUserAndKey, PollingConnection): """ A Connection to Abiquo API. Basic :class:`ConnectionUserAndKey` connection with :class:`PollingConnection` features for asynchronous tasks. """ responseCls = AbiquoResponse def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, retry_delay=None, backoff=None, proxy_url=None): super(AbiquoConnection, self).__init__(user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url) # This attribute stores data cached across multiple request self.cache = {} def add_default_headers(self, headers): """ Add Basic Authentication header to all the requests. It injects the 'Authorization: Basic Base64String===' header in each request :type headers: ``dict`` :param headers: Default input headers :rtype: ``dict`` :return: Default input headers with the 'Authorization' header """ b64string = b('%s:%s' % (self.user_id, self.key)) encoded = base64.b64encode(b64string).decode('utf-8') authorization = 'Basic ' + encoded headers['Authorization'] = authorization return headers def get_poll_request_kwargs(self, response, context, request_kwargs): """ Manage polling request arguments. Return keyword arguments which are passed to the :class:`NodeDriver.request` method when polling for the job status. The Abiquo Asynchronous Response returns and 'acceptedrequest' XmlElement as the following:: You can follow the progress in the link We need to extract the href URI to poll. :type response: :class:`xml.etree.ElementTree` :keyword response: Object returned by poll request. :type request_kwargs: ``dict`` :keyword request_kwargs: Default request arguments and headers :rtype: ``dict`` :return: Modified keyword arguments """ accepted_request_obj = response.object link_poll = get_href(accepted_request_obj, 'status') hdr_poll = {'Accept': 'application/vnd.abiquo.task+xml'} # Override the 'action', 'method' and 'headers' # keys of the previous dict request_kwargs['action'] = link_poll request_kwargs['method'] = 'GET' request_kwargs['headers'] = hdr_poll return request_kwargs def has_completed(self, response): """ Decide if the asynchronous job has ended. :type response: :class:`xml.etree.ElementTree` :param response: Response object returned by poll request :rtype: ``bool`` :return: Whether the job has completed """ task = response.object task_state = task.findtext('state') return task_state in ['FINISHED_SUCCESSFULLY', 'ABORTED', 'FINISHED_UNSUCCESSFULLY'] class ForbiddenError(LibcloudError): """ Exception used when credentials are ok but user has not permissions. """ def __init__(self, driver): message = 'User has not permission to perform this task.' super(ForbiddenError, self).__init__(message, driver) apache-libcloud-2.2.1/libcloud/common/luadns.py0000664000175000017500000000341512701023453021343 0ustar kamikami00000000000000import base64 from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.utils.py3 import b __all__ = [ 'API_HOST', 'LuadnsException', 'LuadnsResponse', 'LuadnsConnection' ] # Endpoint for luadns api API_HOST = 'api.luadns.com' class LuadnsResponse(JsonResponse): errors = [] objects = [] def __init__(self, response, connection): super(LuadnsResponse, self).__init__(response=response, connection=connection) self.errors, self.objects = self.parse_body_and_errors() if not self.success(): raise LuadnsException(code=self.status, message=self.errors.pop()['message']) def parse_body_and_errors(self): js = super(LuadnsResponse, self).parse_body() if 'message' in js: self.errors.append(js) else: self.objects.append(js) return self.errors, self.objects def success(self): return len(self.errors) == 0 class LuadnsConnection(ConnectionUserAndKey): host = API_HOST responseCls = LuadnsResponse def add_default_headers(self, headers): b64string = b('%s:%s' % (self.user_id, self.key)) encoded = base64.b64encode(b64string).decode('utf-8') authorization = 'Basic ' + encoded headers['Accept'] = 'application/json' headers['Authorization'] = authorization return headers class LuadnsException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "Luadns %s %s" % (self.code, self.message) apache-libcloud-2.2.1/libcloud/common/dnsimple.py0000664000175000017500000000375612701023453021700 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse class DNSimpleDNSResponse(JsonResponse): def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # response.success() only checks for 200 and 201 codes. Should we # add 204? return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT] class DNSimpleDNSConnection(ConnectionUserAndKey): host = 'api.dnsimple.com' responseCls = DNSimpleDNSResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ # TODO: fijarse sobre que info se paso como parametro y en base # a esto, fijar el header headers['X-DNSimple-Token'] = '%s:%s' % (self.user_id, self.key) headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' return headers apache-libcloud-2.2.1/libcloud/common/gogrid.py0000664000175000017500000001437312705475361021352 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import time from libcloud.utils.py3 import b from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.types import MalformedResponseError from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.compute.base import NodeLocation HOST = 'api.gogrid.com' PORTS_BY_SECURITY = {True: 443, False: 80} API_VERSION = '1.8' __all__ = [ "GoGridResponse", "GoGridConnection", "GoGridIpAddress", "BaseGoGridDriver", ] class GoGridResponse(JsonResponse): def __init__(self, *args, **kwargs): self.driver = BaseGoGridDriver super(GoGridResponse, self).__init__(*args, **kwargs) def success(self): if self.status == 403: raise InvalidCredsError('Invalid credentials', self.driver) if self.status == 401: raise InvalidCredsError('API Key has insufficient rights', self.driver) if not self.body: return None try: return self.parse_body()['status'] == 'success' except ValueError: raise MalformedResponseError('Malformed reply', body=self.body, driver=self.driver) def parse_error(self): try: return self.parse_body()["list"][0]["message"] except (ValueError, KeyError): return None class GoGridConnection(ConnectionUserAndKey): """ Connection class for the GoGrid driver """ host = HOST responseCls = GoGridResponse def add_default_params(self, params): params["api_key"] = self.user_id params["v"] = API_VERSION params["format"] = 'json' params["sig"] = self.get_signature(self.user_id, self.key) return params def get_signature(self, key, secret): """ create sig from md5 of key + secret + time """ m = hashlib.md5(b(key + secret + str(int(time.time())))) return m.hexdigest() def request(self, action, params=None, data='', headers=None, method='GET', raw=False): return super(GoGridConnection, self).request(action, params, data, headers, method, raw) class GoGridIpAddress(object): """ IP Address """ def __init__(self, id, ip, public, state, subnet): self.id = id self.ip = ip self.public = public self.state = state self.subnet = subnet class BaseGoGridDriver(object): """GoGrid has common object model for services they provide, like locations and IP, so keep handling of these things in a single place.""" name = "GoGrid" def _get_ip(self, element): return element.get('ip').get('ip') def _to_ip(self, element): ip = GoGridIpAddress(id=element['id'], ip=element['ip'], public=element['public'], subnet=element['subnet'], state=element["state"]["name"]) ip.location = self._to_location(element['datacenter']) return ip def _to_ips(self, object): return [self._to_ip(el) for el in object['list']] def _to_location(self, element): # pylint: disable=no-member location = NodeLocation(id=element['id'], name=element['name'], country="US", driver=self.connection.driver) return location def _to_locations(self, object): return [self._to_location(el) for el in object['list']] def ex_list_ips(self, **kwargs): """Return list of IP addresses assigned to the account. :keyword public: set to True to list only public IPs or False to list only private IPs. Set to None or not specify at all not to filter by type :type public: ``bool`` :keyword assigned: set to True to list only addresses assigned to servers, False to list unassigned addresses and set to None or don't set at all not no filter by state :type assigned: ``bool`` :keyword location: filter IP addresses by location :type location: :class:`NodeLocation` :rtype: ``list`` of :class:`GoGridIpAddress` """ params = {} if "public" in kwargs and kwargs["public"] is not None: params["ip.type"] = {True: "Public", False: "Private"}[kwargs["public"]] if "assigned" in kwargs and kwargs["assigned"] is not None: params["ip.state"] = {True: "Assigned", False: "Unassigned"}[kwargs["assigned"]] if "location" in kwargs and kwargs['location'] is not None: params['datacenter'] = kwargs['location'].id # pylint: disable=no-member response = self.connection.request('/api/grid/ip/list', params=params) ips = self._to_ips(response.object) return ips def _get_first_ip(self, location=None): ips = self.ex_list_ips(public=True, assigned=False, location=location) try: return ips[0].ip except IndexError: # pylint: disable=no-member raise LibcloudError('No public unassigned IPs left', self.driver) apache-libcloud-2.2.1/libcloud/common/cloudstack.py0000664000175000017500000001630113153541406022214 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hashlib import copy import hmac from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import b from libcloud.common.types import ProviderError from libcloud.common.base import ConnectionUserAndKey, PollingConnection from libcloud.common.base import JsonResponse from libcloud.common.types import MalformedResponseError from libcloud.compute.types import InvalidCredsError class CloudStackResponse(JsonResponse): def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError('Invalid provider credentials') value = None body = self.parse_body() if hasattr(body, 'values'): values = list(body.values())[0] if 'errortext' in values: value = values['errortext'] if value is None: value = self.body if not value: value = 'WARNING: error message text sent by provider was empty.' error = ProviderError(value=value, http_code=self.status, driver=self.connection.driver) raise error class CloudStackConnection(ConnectionUserAndKey, PollingConnection): responseCls = CloudStackResponse poll_interval = 1 request_method = '_sync_request' timeout = 600 ASYNC_PENDING = 0 ASYNC_SUCCESS = 1 ASYNC_FAILURE = 2 def encode_data(self, data): """ Must of the data is sent as part of query params (eeww), but in newer versions, userdata argument can be sent as a urlencoded data in the request body. """ if data: data = urlencode(data) return data def _make_signature(self, params): signature = [(k.lower(), v) for k, v in list(params.items())] signature.sort(key=lambda x: x[0]) pairs = [] for pair in signature: key = urlquote(str(pair[0]), safe='[]') value = urlquote(str(pair[1]), safe='[]*') item = '%s=%s' % (key, value) pairs .append(item) signature = '&'.join(pairs) signature = signature.lower().replace('+', '%20') signature = hmac.new(b(self.key), msg=b(signature), digestmod=hashlib.sha1) return base64.b64encode(b(signature.digest())) def add_default_params(self, params): params['apiKey'] = self.user_id params['response'] = 'json' return params def pre_connect_hook(self, params, headers): params['signature'] = self._make_signature(params) return params, headers def _async_request(self, command, action=None, params=None, data=None, headers=None, method='GET', context=None): if params: context = copy.deepcopy(params) else: context = {} # Command is specified as part of GET call context['command'] = command result = super(CloudStackConnection, self).async_request( action=action, params=params, data=data, headers=headers, method=method, context=context) return result['jobresult'] def get_request_kwargs(self, action, params=None, data='', headers=None, method='GET', context=None): command = context['command'] request_kwargs = {'command': command, 'action': action, 'params': params, 'data': data, 'headers': headers, 'method': method} return request_kwargs def get_poll_request_kwargs(self, response, context, request_kwargs): job_id = response['jobid'] params = {'jobid': job_id} kwargs = {'command': 'queryAsyncJobResult', 'params': params} return kwargs def has_completed(self, response): status = response.get('jobstatus', self.ASYNC_PENDING) if status == self.ASYNC_FAILURE: msg = response.get('jobresult', {}).get('errortext', status) raise Exception(msg) return status == self.ASYNC_SUCCESS def _sync_request(self, command, action=None, params=None, data=None, headers=None, method='GET'): """ This method handles synchronous calls which are generally fast information retrieval requests and thus return 'quickly'. """ # command is always sent as part of "command" query parameter if params: params = copy.deepcopy(params) else: params = {} params['command'] = command result = self.request(action=self.driver.path, params=params, data=data, headers=headers, method=method) command = command.lower() # Work around for older verions which don't return "response" suffix # in delete ingress rule response command name if (command == 'revokesecuritygroupingress' and 'revokesecuritygroupingressresponse' not in result.object): command = command else: command = command + 'response' if command not in result.object: raise MalformedResponseError( "Unknown response format", body=result.body, driver=self.driver) result = result.object[command] return result class CloudStackDriverMixIn(object): host = None path = None connectionCls = CloudStackConnection def __init__(self, key, secret=None, secure=True, host=None, port=None): host = host or self.host super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host, port) def _sync_request(self, command, action=None, params=None, data=None, headers=None, method='GET'): return self.connection._sync_request(command=command, action=action, params=params, data=data, headers=headers, method=method) def _async_request(self, command, action=None, params=None, data=None, headers=None, method='GET', context=None): return self.connection._async_request(command=command, action=action, params=params, data=data, headers=headers, method=method, context=context) apache-libcloud-2.2.1/libcloud/common/exceptions.py0000664000175000017500000000435213153541406022244 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'BaseHTTPError', 'RateLimitReachedError', 'exception_from_message' ] class BaseHTTPError(Exception): """ The base exception class for all HTTP related exceptions. """ def __init__(self, code, message, headers=None): self.code = code self.message = message self.headers = headers # preserve old exception behavior for tests that # look for e.args[0] super(BaseHTTPError, self).__init__(message) def __str__(self): return self.message class RateLimitReachedError(BaseHTTPError): """ HTTP 429 - Rate limit: you've sent too many requests for this time period. """ code = 429 message = '%s Rate limit exceeded' % (code) def __init__(self, *args, **kwargs): self.retry_after = int(kwargs.pop('retry_after', 0)) _error_classes = [RateLimitReachedError] _code_map = dict((c.code, c) for c in _error_classes) def exception_from_message(code, message, headers=None): """ Return an instance of BaseHTTPException or subclass based on response code. Usage:: raise exception_from_message(code=self.status, message=self.parse_error()) """ kwargs = { 'code': code, 'message': message, 'headers': headers } if headers and 'retry_after' in headers: kwargs['retry_after'] = headers['retry_after'] cls = _code_map.get(code, BaseHTTPError) return cls(**kwargs) apache-libcloud-2.2.1/libcloud/common/types.py0000664000175000017500000001113313153541406021222 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import httplib __all__ = [ "LibcloudError", "MalformedResponseError", "ProviderError", "InvalidCredsError", "InvalidCredsException", "LazyList" ] class LibcloudError(Exception): """The base class for other libcloud exceptions""" def __init__(self, value, driver=None): super(LibcloudError, self).__init__(value) self.value = value self.driver = driver def __str__(self): return self.__repr__() def __repr__(self): return ("") class MalformedResponseError(LibcloudError): """Exception for the cases when a provider returns a malformed response, e.g. you request JSON and provider returns '

something

' due to some error on their side.""" def __init__(self, value, body=None, driver=None): self.value = value self.driver = driver self.body = body def __str__(self): return self.__repr__() def __repr__(self): return (": " + repr(self.body)) class ProviderError(LibcloudError): """ Exception used when provider gives back error response (HTTP 4xx, 5xx) for a request. Specific sub types can be derieved for errors like HTTP 401 : InvalidCredsError HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError """ def __init__(self, value, http_code, driver=None): super(ProviderError, self).__init__(value=value, driver=driver) self.http_code = http_code def __str__(self): return self.__repr__() def __repr__(self): return repr(self.value) class InvalidCredsError(ProviderError): """Exception used when invalid credentials are used on a provider.""" def __init__(self, value='Invalid credentials with the provider', driver=None): super(InvalidCredsError, self).__init__(value, http_code=httplib.UNAUTHORIZED, driver=driver) # Deprecated alias of :class:`InvalidCredsError` InvalidCredsException = InvalidCredsError class ServiceUnavailableError(ProviderError): """Exception used when a provider returns 503 Service Unavailable.""" def __init__(self, value='Service unavailable at provider', driver=None): super(ServiceUnavailableError, self).__init__( value, http_code=httplib.SERVICE_UNAVAILABLE, driver=driver ) class LazyList(object): def __init__(self, get_more, value_dict=None): self._data = [] self._last_key = None self._exhausted = False self._all_loaded = False self._get_more = get_more self._value_dict = value_dict or {} def __iter__(self): if not self._all_loaded: self._load_all() data = self._data for i in data: yield i def __getitem__(self, index): if index >= len(self._data) and not self._all_loaded: self._load_all() return self._data[index] def __len__(self): self._load_all() return len(self._data) def __repr__(self): self._load_all() repr_string = ', ' .join([repr(item) for item in self._data]) repr_string = '[%s]' % (repr_string) return repr_string def _load_all(self): while not self._exhausted: newdata, self._last_key, self._exhausted = \ self._get_more(last_key=self._last_key, value_dict=self._value_dict) self._data.extend(newdata) self._all_loaded = True apache-libcloud-2.2.1/libcloud/common/openstack.py0000664000175000017500000004225513153541406022056 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common utilities for OpenStack """ from libcloud.utils.py3 import ET from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey, Response from libcloud.common.types import ProviderError from libcloud.compute.types import (LibcloudError, MalformedResponseError) from libcloud.compute.types import KeyPairDoesNotExistError from libcloud.common.openstack_identity import get_class_for_auth_version # Imports for backward compatibility reasons from libcloud.common.openstack_identity import (OpenStackServiceCatalog, OpenStackIdentityTokenScope) try: import simplejson as json except ImportError: import json AUTH_API_VERSION = '1.1' # Auth versions which contain token expiration information. AUTH_VERSIONS_WITH_EXPIRES = [ '1.1', '2.0', '2.0_apikey', '2.0_password', '3.x', '3.x_password' ] __all__ = [ 'OpenStackBaseConnection', 'OpenStackResponse', 'OpenStackException', 'OpenStackDriverMixin' ] class OpenStackBaseConnection(ConnectionUserAndKey): """ Base class for OpenStack connections. :param user_id: User name to use when authenticating :type user_id: ``str`` :param key: Secret to use when authenticating. :type key: ``str`` :param secure: Use HTTPS? (True by default.) :type secure: ``bool`` :param ex_force_base_url: Base URL for connection requests. If not specified, this will be determined by authenticating. :type ex_force_base_url: ``str`` :param ex_force_auth_url: Base URL for authentication requests. :type ex_force_auth_url: ``str`` :param ex_force_auth_version: Authentication version to use. If not specified, defaults to AUTH_API_VERSION. :type ex_force_auth_version: ``str`` :param ex_force_auth_token: Authentication token to use for connection requests. If specified, the connection will not attempt to authenticate, and the value of ex_force_base_url will be used to determine the base request URL. If ex_force_auth_token is passed in, ex_force_base_url must also be provided. :type ex_force_auth_token: ``str`` :param token_scope: Whether to scope a token to a "project", a "domain" or "unscoped". :type token_scope: ``str`` :param ex_domain_name: When authenticating, provide this domain name to the identity service. A scoped token will be returned. Some cloud providers require the domain name to be provided at authentication time. Others will use a default domain if none is provided. :type ex_domain_name: ``str`` :param ex_tenant_name: When authenticating, provide this tenant name to the identity service. A scoped token will be returned. Some cloud providers require the tenant name to be provided at authentication time. Others will use a default tenant if none is provided. :type ex_tenant_name: ``str`` :param ex_force_service_type: Service type to use when selecting an service. If not specified, a provider specific default will be used. :type ex_force_service_type: ``str`` :param ex_force_service_name: Service name to use when selecting an service. If not specified, a provider specific default will be used. :type ex_force_service_name: ``str`` :param ex_force_service_region: Region to use when selecting an service. If not specified, a provider specific default will be used. :type ex_force_service_region: ``str`` """ auth_url = None auth_token = None auth_token_expires = None auth_user_info = None service_catalog = None service_type = None service_name = None service_region = None accept_format = None _auth_version = None def __init__(self, user_id, key, secure=True, host=None, port=None, timeout=None, proxy_url=None, ex_force_base_url=None, ex_force_auth_url=None, ex_force_auth_version=None, ex_force_auth_token=None, ex_token_scope=OpenStackIdentityTokenScope.PROJECT, ex_domain_name='Default', ex_tenant_name=None, ex_force_service_type=None, ex_force_service_name=None, ex_force_service_region=None, retry_delay=None, backoff=None): super(OpenStackBaseConnection, self).__init__( user_id, key, secure=secure, timeout=timeout, retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url) if ex_force_auth_version: self._auth_version = ex_force_auth_version self._ex_force_base_url = ex_force_base_url self._ex_force_auth_url = ex_force_auth_url self._ex_force_auth_token = ex_force_auth_token self._ex_token_scope = ex_token_scope self._ex_domain_name = ex_domain_name self._ex_tenant_name = ex_tenant_name self._ex_force_service_type = ex_force_service_type self._ex_force_service_name = ex_force_service_name self._ex_force_service_region = ex_force_service_region self._osa = None if ex_force_auth_token and not ex_force_base_url: raise LibcloudError( 'Must also provide ex_force_base_url when specifying ' 'ex_force_auth_token.') if ex_force_auth_token: self.auth_token = ex_force_auth_token if not self._auth_version: self._auth_version = AUTH_API_VERSION auth_url = self._get_auth_url() if not auth_url: raise LibcloudError('OpenStack instance must ' + 'have auth_url set') def get_auth_class(self): """ Retrieve identity / authentication class instance. :rtype: :class:`OpenStackIdentityConnection` """ if not self._osa: auth_url = self._get_auth_url() cls = get_class_for_auth_version(auth_version=self._auth_version) self._osa = cls(auth_url=auth_url, user_id=self.user_id, key=self.key, tenant_name=self._ex_tenant_name, domain_name=self._ex_domain_name, token_scope=self._ex_token_scope, timeout=self.timeout, parent_conn=self) return self._osa def request(self, action, params=None, data='', headers=None, method='GET', raw=False): headers = headers or {} params = params or {} # Include default content-type for POST and PUT request (if available) default_content_type = getattr(self, 'default_content_type', None) if method.upper() in ['POST', 'PUT'] and default_content_type: headers = {'Content-Type': default_content_type} return super(OpenStackBaseConnection, self).request(action=action, params=params, data=data, method=method, headers=headers, raw=raw) def _get_auth_url(self): """ Retrieve auth url for this instance using either "ex_force_auth_url" constructor kwarg of "auth_url" class variable. """ auth_url = self.auth_url if self._ex_force_auth_url is not None: auth_url = self._ex_force_auth_url return auth_url def get_service_catalog(self): if self.service_catalog is None: self._populate_hosts_and_request_paths() return self.service_catalog def get_service_name(self): """ Gets the service name used to look up the endpoint in the service catalog. :return: name of the service in the catalog """ if self._ex_force_service_name: return self._ex_force_service_name return self.service_name def get_endpoint(self): """ Selects the endpoint to use based on provider specific values, or overrides passed in by the user when setting up the driver. :returns: url of the relevant endpoint for the driver """ service_type = self.service_type service_name = self.service_name service_region = self.service_region if self._ex_force_service_type: service_type = self._ex_force_service_type if self._ex_force_service_name: service_name = self._ex_force_service_name if self._ex_force_service_region: service_region = self._ex_force_service_region endpoint = self.service_catalog.get_endpoint(service_type=service_type, name=service_name, region=service_region) url = endpoint.url if not url: raise LibcloudError('Could not find specified endpoint') return url def add_default_headers(self, headers): headers['X-Auth-Token'] = self.auth_token headers['Accept'] = self.accept_format return headers def morph_action_hook(self, action): self._populate_hosts_and_request_paths() return super(OpenStackBaseConnection, self).morph_action_hook(action) def _set_up_connection_info(self, url): result = self._tuple_from_url(url) (self.host, self.port, self.secure, self.request_path) = result self.connect() def _populate_hosts_and_request_paths(self): """ OpenStack uses a separate host for API calls which is only provided after an initial authentication request. """ osa = self.get_auth_class() if self._ex_force_auth_token: # If ex_force_auth_token is provided we always hit the api directly # and never try to authenticate. # # Note: When ex_force_auth_token is provided, ex_force_base_url # must be provided as well. self._set_up_connection_info(url=self._ex_force_base_url) return if not osa.is_token_valid(): # Token is not available or it has expired. Need to retrieve a # new one. if self._auth_version == '2.0_apikey': kwargs = {'auth_type': 'api_key'} elif self._auth_version == '2.0_password': kwargs = {'auth_type': 'password'} else: kwargs = {} osa = osa.authenticate(**kwargs) # may throw InvalidCreds self.auth_token = osa.auth_token self.auth_token_expires = osa.auth_token_expires self.auth_user_info = osa.auth_user_info # Pull out and parse the service catalog osc = OpenStackServiceCatalog(service_catalog=osa.urls, auth_version=self._auth_version) self.service_catalog = osc url = self._ex_force_base_url or self.get_endpoint() self._set_up_connection_info(url=url) class OpenStackException(ProviderError): pass class OpenStackResponse(Response): node_driver = None def success(self): i = int(self.status) return i >= 200 and i <= 299 def has_content_type(self, content_type): content_type_value = self.headers.get('content-type') or '' content_type_value = content_type_value.lower() return content_type_value.find(content_type.lower()) > -1 def parse_body(self): if self.status == httplib.NO_CONTENT or not self.body: return None if self.has_content_type('application/xml'): try: return ET.XML(self.body) except: raise MalformedResponseError( 'Failed to parse XML', body=self.body, driver=self.node_driver) elif self.has_content_type('application/json'): try: return json.loads(self.body) except: raise MalformedResponseError( 'Failed to parse JSON', body=self.body, driver=self.node_driver) else: return self.body def parse_error(self): text = None body = self.parse_body() if self.has_content_type('application/xml'): text = '; '.join([err.text or '' for err in body.getiterator() if err.text]) elif self.has_content_type('application/json'): values = list(body.values()) context = self.connection.context driver = self.connection.driver key_pair_name = context.get('key_pair_name', None) if len(values) > 0 and values[0]['code'] == 404 and key_pair_name: raise KeyPairDoesNotExistError(name=key_pair_name, driver=driver) elif len(values) > 0 and 'message' in values[0]: text = ';'.join([fault_data['message'] for fault_data in values]) else: text = body else: # while we hope a response is always one of xml or json, we have # seen html or text in the past, its not clear we can really do # something to make it more readable here, so we will just pass # it along as the whole response body in the text variable. text = body return '%s %s %s' % (self.status, self.error, text) class OpenStackDriverMixin(object): def __init__(self, ex_force_base_url=None, ex_force_auth_url=None, ex_force_auth_version=None, ex_force_auth_token=None, ex_token_scope=OpenStackIdentityTokenScope.PROJECT, ex_domain_name='Default', ex_tenant_name=None, ex_force_service_type=None, ex_force_service_name=None, ex_force_service_region=None, *args, **kwargs): self._ex_force_base_url = ex_force_base_url self._ex_force_auth_url = ex_force_auth_url self._ex_force_auth_version = ex_force_auth_version self._ex_force_auth_token = ex_force_auth_token self._ex_token_scope = ex_token_scope self._ex_domain_name = ex_domain_name self._ex_tenant_name = ex_tenant_name self._ex_force_service_type = ex_force_service_type self._ex_force_service_name = ex_force_service_name self._ex_force_service_region = ex_force_service_region def openstack_connection_kwargs(self): """ :rtype: ``dict`` """ rv = {} if self._ex_force_base_url: rv['ex_force_base_url'] = self._ex_force_base_url if self._ex_force_auth_token: rv['ex_force_auth_token'] = self._ex_force_auth_token if self._ex_force_auth_url: rv['ex_force_auth_url'] = self._ex_force_auth_url if self._ex_force_auth_version: rv['ex_force_auth_version'] = self._ex_force_auth_version if self._ex_token_scope: rv['ex_token_scope'] = self._ex_token_scope if self._ex_domain_name: rv['ex_domain_name'] = self._ex_domain_name if self._ex_tenant_name: rv['ex_tenant_name'] = self._ex_tenant_name if self._ex_force_service_type: rv['ex_force_service_type'] = self._ex_force_service_type if self._ex_force_service_name: rv['ex_force_service_name'] = self._ex_force_service_name if self._ex_force_service_region: rv['ex_force_service_region'] = self._ex_force_service_region return rv apache-libcloud-2.2.1/libcloud/common/base.py0000664000175000017500000007762013160236032020777 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import ssl import socket import copy import binascii import time from libcloud.utils.py3 import ET try: import simplejson as json except: import json import requests import libcloud from libcloud.utils.py3 import PY25 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import urlencode from libcloud.utils.misc import lowercase_keys, retry from libcloud.common.exceptions import exception_from_message from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.http import LibcloudConnection, HttpLibResponseProxy __all__ = [ 'RETRY_FAILED_HTTP_REQUESTS', 'BaseDriver', 'Connection', 'PollingConnection', 'ConnectionKey', 'ConnectionUserAndKey', 'CertificateConnection', 'Response', 'HTTPResponse', 'JsonResponse', 'XmlResponse', 'RawResponse' ] # Module level variable indicates if the failed HTTP requests should be retried RETRY_FAILED_HTTP_REQUESTS = False class LazyObject(object): """An object that doesn't get initialized until accessed.""" @classmethod def _proxy(cls, *lazy_init_args, **lazy_init_kwargs): class Proxy(cls, object): _lazy_obj = None def __init__(self): # Must override the lazy_cls __init__ pass def __getattribute__(self, attr): lazy_obj = object.__getattribute__(self, '_get_lazy_obj')() return getattr(lazy_obj, attr) def __setattr__(self, attr, value): lazy_obj = object.__getattribute__(self, '_get_lazy_obj')() setattr(lazy_obj, attr, value) def _get_lazy_obj(self): lazy_obj = object.__getattribute__(self, '_lazy_obj') if lazy_obj is None: lazy_obj = cls(*lazy_init_args, **lazy_init_kwargs) object.__setattr__(self, '_lazy_obj', lazy_obj) return lazy_obj return Proxy() @classmethod def lazy(cls, *lazy_init_args, **lazy_init_kwargs): """Create a lazily instantiated instance of the subclass, cls.""" return cls._proxy(*lazy_init_args, **lazy_init_kwargs) class HTTPResponse(httplib.HTTPResponse): # On python 2.6 some calls can hang because HEAD isn't quite properly # supported. # In particular this happens on S3 when calls are made to get_object to # objects that don't exist. # This applies the behaviour from 2.7, fixing the hangs. def read(self, amt=None): if self.fp is None: return '' if self._method == 'HEAD': self.close() return '' return httplib.HTTPResponse.read(self, amt) class Response(object): """ A base Response class to derive from. """ status = httplib.OK # Response status code headers = {} # Response headers body = None # Raw response body object = None # Parsed response body error = None # Reason returned by the server. connection = None # Parent connection class parse_zero_length_body = False def __init__(self, response, connection): """ :param response: HTTP response object. (optional) :type response: :class:`httplib.HTTPResponse` :param connection: Parent connection object. :type connection: :class:`.Connection` """ self.connection = connection # http.client In Python 3 doesn't automatically lowercase the header # names self.headers = lowercase_keys(dict(response.headers)) self.error = response.reason self.status = response.status_code self.request = response.request self.iter_content = response.iter_content self.body = response.text.strip() \ if response.text is not None and hasattr(response.text, 'strip') \ else '' if not self.success(): raise exception_from_message(code=self.status, message=self.parse_error()) self.object = self.parse_body() def parse_body(self): """ Parse response body. Override in a provider's subclass. :return: Parsed body. :rtype: ``str`` """ return self.body if self.body is not None else '' def parse_error(self): """ Parse the error messages. Override in a provider's subclass. :return: Parsed error. :rtype: ``str`` """ return self.body def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # pylint: disable=E1101 return self.status in [requests.codes.ok, requests.codes.created, httplib.OK, httplib.CREATED, httplib.ACCEPTED] class JsonResponse(Response): """ A Base JSON Response class to derive from. """ def parse_body(self): if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: body = json.loads(self.body) except: raise MalformedResponseError( 'Failed to parse JSON', body=self.body, driver=self.connection.driver) return body parse_error = parse_body class XmlResponse(Response): """ A Base XML Response class to derive from. """ def parse_body(self): if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: try: body = ET.XML(self.body) except ValueError: # lxml wants a bytes and tests are basically hard-coded to str body = ET.XML(self.body.encode('utf-8')) except: raise MalformedResponseError('Failed to parse XML', body=self.body, driver=self.connection.driver) return body parse_error = parse_body class RawResponse(Response): def __init__(self, connection, response=None): """ :param connection: Parent connection object. :type connection: :class:`.Connection` """ self._status = None self._response = None self._headers = {} self._error = None self._reason = None self.connection = connection if response is not None: self.headers = lowercase_keys(dict(response.headers)) self.error = response.reason self.status = response.status_code self.request = response.request self.iter_content = response.iter_content def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # pylint: disable=E1101 return self.status in [requests.codes.ok, requests.codes.created, httplib.OK, httplib.CREATED, httplib.ACCEPTED] @property def response(self): if not self._response: response = self.connection.connection.getresponse() self._response = HttpLibResponseProxy(response) self.body = response.content if not self.success(): self.parse_error() return self._response @property def reason(self): if not self._reason: self._reason = self.response.reason return self._reason class Connection(object): """ A Base Connection class to derive from. """ conn_class = LibcloudConnection responseCls = Response rawResponseCls = RawResponse connection = None host = '127.0.0.1' port = 443 timeout = None secure = 1 driver = None action = None cache_busting = False backoff = None retry_delay = None allow_insecure = True def __init__(self, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, retry_delay=None, backoff=None): self.secure = secure and 1 or 0 self.ua = [] self.context = {} if not self.allow_insecure and not secure: # TODO: We should eventually switch to whitelist instead of # blacklist approach raise ValueError('Non https connections are not allowed (use ' 'secure=True)') self.request_path = '' if host: self.host = host if port is not None: self.port = port else: if self.secure == 1: self.port = 443 else: self.port = 80 if url: (self.host, self.port, self.secure, self.request_path) = self._tuple_from_url(url) self.timeout = timeout or self.timeout self.retry_delay = retry_delay self.backoff = backoff self.proxy_url = proxy_url def set_http_proxy(self, proxy_url): """ Set a HTTP proxy which will be used with this connection. :param proxy_url: Proxy URL (e.g. http://: without authentication and http://:@: for basic auth authentication information. :type proxy_url: ``str`` """ self.proxy_url = proxy_url def set_context(self, context): if not isinstance(context, dict): raise TypeError('context needs to be a dictionary') self.context = context def reset_context(self): self.context = {} def _tuple_from_url(self, url): secure = 1 port = None (scheme, netloc, request_path, param, query, fragment) = urlparse.urlparse(url) if scheme not in ['http', 'https']: raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url)) if scheme == "http": secure = 0 if ":" in netloc: netloc, port = netloc.rsplit(":") port = int(port) if not port: if scheme == "http": port = 80 else: port = 443 host = netloc port = int(port) return (host, port, secure, request_path) def connect(self, host=None, port=None, base_url=None, **kwargs): """ Establish a connection with the API server. :type host: ``str`` :param host: Optional host to override our default :type port: ``int`` :param port: Optional port to override our default :returns: A connection """ # prefer the attribute base_url if its set or sent connection = None secure = self.secure if getattr(self, 'base_url', None) and base_url is None: (host, port, secure, request_path) = \ self._tuple_from_url(getattr(self, 'base_url')) elif base_url is not None: (host, port, secure, request_path) = self._tuple_from_url(base_url) else: host = host or self.host port = port or self.port # Make sure port is an int port = int(port) if not hasattr(kwargs, 'host'): kwargs.update({'host': host}) if not hasattr(kwargs, 'port'): kwargs.update({'port': port}) if not hasattr(kwargs, 'secure'): kwargs.update({'secure': self.secure}) if not hasattr(kwargs, 'key_file') and hasattr(self, 'key_file'): kwargs.update({'key_file': getattr(self, 'key_file')}) if not hasattr(kwargs, 'cert_file') and hasattr(self, 'cert_file'): kwargs.update({'cert_file': getattr(self, 'cert_file')}) # Timeout is only supported in Python 2.6 and later # http://docs.python.org/library/httplib.html#httplib.HTTPConnection if self.timeout and not PY25: kwargs.update({'timeout': self.timeout}) if self.proxy_url: kwargs.update({'proxy_url': self.proxy_url}) connection = self.conn_class(**kwargs) # You can uncoment this line, if you setup a reverse proxy server # which proxies to your endpoint, and lets you easily capture # connections in cleartext when you setup the proxy to do SSL # for you # connection = self.conn_class("127.0.0.1", 8080) self.connection = connection def _user_agent(self): user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua]) if self.driver: user_agent = 'libcloud/%s (%s) %s' % ( libcloud.__version__, self.driver.name, user_agent_suffix) else: user_agent = 'libcloud/%s %s' % ( libcloud.__version__, user_agent_suffix) return user_agent def user_agent_append(self, token): """ Append a token to a user agent string. Users of the library should call this to uniquely identify their requests to a provider. :type token: ``str`` :param token: Token to add to the user agent. """ self.ua.append(token) def request(self, action, params=None, data=None, headers=None, method='GET', raw=False, stream=False): """ Request a given `action`. Basically a wrapper around the connection object's `request` that does some helpful pre-processing. :type action: ``str`` :param action: A path. This can include arguments. If included, any extra parameters are appended to the existing ones. :type params: ``dict`` :param params: Optional mapping of additional parameters to send. If None, leave as an empty ``dict``. :type data: ``unicode`` :param data: A body of data to send with the request. :type headers: ``dict`` :param headers: Extra headers to add to the request None, leave as an empty ``dict``. :type method: ``str`` :param method: An HTTP method such as "GET" or "POST". :type raw: ``bool`` :param raw: True to perform a "raw" request aka only send the headers and use the rawResponseCls class. This is used with storage API when uploading a file. :type stream: ``bool`` :param stream: True to return an iterator in Response.iter_content and allow streaming of the response data (for downloading large files) :return: An :class:`Response` instance. :rtype: :class:`Response` instance """ if params is None: params = {} else: params = copy.copy(params) if headers is None: headers = {} else: headers = copy.copy(headers) retry_enabled = os.environ.get('LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS', False) or RETRY_FAILED_HTTP_REQUESTS action = self.morph_action_hook(action) self.action = action self.method = method self.data = data # Extend default parameters params = self.add_default_params(params) # Add cache busting parameters (if enabled) if self.cache_busting and method == 'GET': params = self._add_cache_busting_to_params(params=params) # Extend default headers headers = self.add_default_headers(headers) # We always send a user-agent header headers.update({'User-Agent': self._user_agent()}) # Indicate that we support gzip and deflate compression headers.update({'Accept-Encoding': 'gzip,deflate'}) port = int(self.port) if port not in (80, 443): headers.update({'Host': "%s:%d" % (self.host, port)}) else: headers.update({'Host': self.host}) if data: data = self.encode_data(data) params, headers = self.pre_connect_hook(params, headers) if params: if '?' in action: url = '&'.join((action, urlencode(params, doseq=True))) else: url = '?'.join((action, urlencode(params, doseq=True))) else: url = action # IF connection has not yet been established if self.connection is None: self.connect() try: # @TODO: Should we just pass File object as body to request method # instead of dealing with splitting and sending the file ourselves? if raw: self.connection.prepared_request( method=method, url=url, body=data, headers=headers, stream=stream) else: if retry_enabled: retry_request = retry(timeout=self.timeout, retry_delay=self.retry_delay, backoff=self.backoff) retry_request(self.connection.request)(method=method, url=url, body=data, headers=headers, stream=stream) else: self.connection.request(method=method, url=url, body=data, headers=headers, stream=stream) except socket.gaierror: e = sys.exc_info()[1] message = str(e) errno = getattr(e, 'errno', None) if errno == -5: # Throw a more-friendly exception on "no address associated # with hostname" error. This error could simpli indicate that # "host" Connection class attribute is set to an incorrect # value class_name = self.__class__.__name__ msg = ('%s. Perhaps "host" Connection class attribute ' '(%s.connection) is set to an invalid, non-hostname ' 'value (%s)?' % (message, class_name, self.host)) raise socket.gaierror(msg) self.reset_context() raise e except ssl.SSLError: e = sys.exc_info()[1] self.reset_context() raise ssl.SSLError(str(e)) if raw: responseCls = self.rawResponseCls kwargs = {'connection': self, 'response': self.connection.getresponse()} else: responseCls = self.responseCls kwargs = {'connection': self, 'response': self.connection.getresponse()} try: response = responseCls(**kwargs) finally: # Always reset the context after the request has completed self.reset_context() return response def morph_action_hook(self, action): url = urlparse.urljoin(self.request_path.lstrip('/').rstrip('/') + '/', action.lstrip('/')) if not url.startswith('/'): return '/' + url else: return url def add_default_params(self, params): """ Adds default parameters (such as API key, version, etc.) to the passed `params` Should return a dictionary. """ return params def add_default_headers(self, headers): """ Adds default headers (such as Authorization, X-Foo-Bar) to the passed `headers` Should return a dictionary. """ return headers def pre_connect_hook(self, params, headers): """ A hook which is called before connecting to the remote server. This hook can perform a final manipulation on the params, headers and url parameters. :type params: ``dict`` :param params: Request parameters. :type headers: ``dict`` :param headers: Request headers. """ return params, headers def encode_data(self, data): """ Encode body data. Override in a provider's subclass. """ return data def _add_cache_busting_to_params(self, params): """ Add cache busting parameter to the query parameters of a GET request. Parameters are only added if "cache_busting" class attribute is set to True. Note: This should only be used with *naughty* providers which use excessive caching of responses. """ cache_busting_value = binascii.hexlify(os.urandom(8)).decode('ascii') if isinstance(params, dict): params['cache-busting'] = cache_busting_value else: params.append(('cache-busting', cache_busting_value)) return params class PollingConnection(Connection): """ Connection class which can also work with the async APIs. After initial requests, this class periodically polls for jobs status and waits until the job has finished. If job doesn't finish in timeout seconds, an Exception thrown. """ poll_interval = 0.5 timeout = 200 request_method = 'request' def async_request(self, action, params=None, data=None, headers=None, method='GET', context=None): """ Perform an 'async' request to the specified path. Keep in mind that this function is *blocking* and 'async' in this case means that the hit URL only returns a job ID which is the periodically polled until the job has completed. This function works like this: - Perform a request to the specified path. Response should contain a 'job_id'. - Returned 'job_id' is then used to construct a URL which is used for retrieving job status. Constructed URL is then periodically polled until the response indicates that the job has completed or the timeout of 'self.timeout' seconds has been reached. :type action: ``str`` :param action: A path :type params: ``dict`` :param params: Optional mapping of additional parameters to send. If None, leave as an empty ``dict``. :type data: ``unicode`` :param data: A body of data to send with the request. :type headers: ``dict`` :param headers: Extra headers to add to the request None, leave as an empty ``dict``. :type method: ``str`` :param method: An HTTP method such as "GET" or "POST". :type context: ``dict`` :param context: Context dictionary which is passed to the functions which construct initial and poll URL. :return: An :class:`Response` instance. :rtype: :class:`Response` instance """ request = getattr(self, self.request_method) kwargs = self.get_request_kwargs(action=action, params=params, data=data, headers=headers, method=method, context=context) response = request(**kwargs) kwargs = self.get_poll_request_kwargs(response=response, context=context, request_kwargs=kwargs) end = time.time() + self.timeout completed = False while time.time() < end and not completed: response = request(**kwargs) completed = self.has_completed(response=response) if not completed: time.sleep(self.poll_interval) if not completed: raise LibcloudError('Job did not complete in %s seconds' % (self.timeout)) return response def get_request_kwargs(self, action, params=None, data=None, headers=None, method='GET', context=None): """ Arguments which are passed to the initial request() call inside async_request. """ kwargs = {'action': action, 'params': params, 'data': data, 'headers': headers, 'method': method} return kwargs def get_poll_request_kwargs(self, response, context, request_kwargs): """ Return keyword arguments which are passed to the request() method when polling for the job status. :param response: Response object returned by poll request. :type response: :class:`HTTPResponse` :param request_kwargs: Kwargs previously used to initiate the poll request. :type response: ``dict`` :return ``dict`` Keyword arguments """ raise NotImplementedError('get_poll_request_kwargs not implemented') def has_completed(self, response): """ Return job completion status. :param response: Response object returned by poll request. :type response: :class:`HTTPResponse` :return ``bool`` True if the job has completed, False otherwise. """ raise NotImplementedError('has_completed not implemented') class ConnectionKey(Connection): """ Base connection class which accepts a single ``key`` argument. """ def __init__(self, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, backoff=None, retry_delay=None): """ Initialize `user_id` and `key`; set `secure` to an ``int`` based on passed value. """ super(ConnectionKey, self).__init__(secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url, backoff=backoff, retry_delay=retry_delay) self.key = key class CertificateConnection(Connection): """ Base connection class which accepts a single ``cert_file`` argument. """ def __init__(self, cert_file, secure=True, host=None, port=None, url=None, proxy_url=None, timeout=None, backoff=None, retry_delay=None): """ Initialize `cert_file`; set `secure` to an ``int`` based on passed value. """ super(CertificateConnection, self).__init__(secure=secure, host=host, port=port, url=url, timeout=timeout, backoff=backoff, retry_delay=retry_delay, proxy_url=proxy_url) self.cert_file = cert_file class KeyCertificateConnection(CertificateConnection): """ Base connection class which accepts both ``key_file`` and ``cert_file`` argument. """ key_file = None def __init__(self, key_file, cert_file, secure=True, host=None, port=None, url=None, proxy_url=None, timeout=None, backoff=None, retry_delay=None): """ Initialize `cert_file`; set `secure` to an ``int`` based on passed value. """ super(KeyCertificateConnection, self).__init__(cert_file, secure=secure, host=host, port=port, url=url, timeout=timeout, backoff=backoff, retry_delay=retry_delay, proxy_url=proxy_url) self.key_file = key_file class ConnectionUserAndKey(ConnectionKey): """ Base connection class which accepts a ``user_id`` and ``key`` argument. """ user_id = None def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, backoff=None, retry_delay=None): super(ConnectionUserAndKey, self).__init__(key, secure=secure, host=host, port=port, url=url, timeout=timeout, backoff=backoff, retry_delay=retry_delay, proxy_url=proxy_url) self.user_id = user_id class BaseDriver(object): """ Base driver class from which other classes can inherit from. """ connectionCls = ConnectionKey def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=None, **kwargs): """ :param key: API key or username to be used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :param api_version: Optional API version. Only used by drivers which support multiple API versions. :type api_version: ``str`` :param region: Optional driver region. Only used by drivers which support multiple regions. :type region: ``str`` :rtype: ``None`` """ self.key = key self.secret = secret self.secure = secure args = [self.key] if self.secret is not None: args.append(self.secret) args.append(secure) if host is not None: args.append(host) if port is not None: args.append(port) self.api_version = api_version self.region = region conn_kwargs = self._ex_connection_class_kwargs() conn_kwargs.update({'timeout': kwargs.pop('timeout', None), 'retry_delay': kwargs.pop('retry_delay', None), 'backoff': kwargs.pop('backoff', None), 'proxy_url': kwargs.pop('proxy_url', None)}) self.connection = self.connectionCls(*args, **conn_kwargs) self.connection.driver = self self.connection.connect() def _ex_connection_class_kwargs(self): """ Return extra connection keyword arguments which are passed to the Connection class constructor. """ return {} apache-libcloud-2.2.1/libcloud/common/softlayer.py0000664000175000017500000000557212705462564022111 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Softlayer connection """ from libcloud.common.base import ConnectionUserAndKey from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError class SoftLayerException(LibcloudError): """ Exception class for SoftLayer driver """ pass class SoftLayerObjectDoesntExist(LibcloudError): """ Exception class for SoftLayer driver object doesnt exist """ pass class SoftLayerResponse(XMLRPCResponse): defaultExceptionCls = SoftLayerException exceptions = { 'SoftLayer_Account': InvalidCredsError, 'SoftLayer_Exception_ObjectNotFound': SoftLayerObjectDoesntExist } class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey): responseCls = SoftLayerResponse host = 'api.softlayer.com' endpoint = '/xmlrpc/v3' def request(self, service, method, *args, **kwargs): headers = {} headers.update(self._get_auth_headers()) headers.update(self._get_init_params(service, kwargs.get('id'))) headers.update( self._get_object_mask(service, kwargs.get('object_mask'))) headers.update( self._get_object_mask(service, kwargs.get('object_mask'))) args = ({'headers': headers}, ) + args endpoint = '%s/%s' % (self.endpoint, service) return super(SoftLayerConnection, self).request(method, *args, **{'endpoint': endpoint}) def _get_auth_headers(self): return { 'authenticate': { 'username': self.user_id, 'apiKey': self.key } } def _get_init_params(self, service, id): if id is not None: return { '%sInitParameters' % service: {'id': id} } else: return {} def _get_object_mask(self, service, mask): if mask is not None: return { '%sObjectMask' % service: {'mask': mask} } else: return {} apache-libcloud-2.2.1/libcloud/common/buddyns.py0000664000175000017500000000454012703467102021532 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import ConnectionKey, JsonResponse __all__ = [ 'API_HOST', 'BuddyNSException', 'BuddyNSResponse', 'BuddyNSConnection' ] # Endpoint for buddyns api API_HOST = 'www.buddyns.com' class BuddyNSResponse(JsonResponse): errors = [] objects = [] def __init__(self, response, connection): super(BuddyNSResponse, self).__init__(response=response, connection=connection) self.errors, self.objects = self.parse_body_and_errors() if not self.success(): raise BuddyNSException(code=self.status, message=self.errors.pop()['detail']) def parse_body_and_errors(self): js = super(BuddyNSResponse, self).parse_body() if 'detail' in js: self.errors.append(js) else: self.objects.append(js) return self.errors, self.objects def success(self): return len(self.errors) == 0 class BuddyNSConnection(ConnectionKey): host = API_HOST responseCls = BuddyNSResponse def add_default_headers(self, headers): headers['content-type'] = 'application/json' headers['Authorization'] = 'Token' + ' ' + self.key return headers class BuddyNSException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "BuddyNSException %s %s" % (self.code, self.message) apache-libcloud-2.2.1/libcloud/common/brightbox.py0000664000175000017500000000652512701023453022052 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.compute.types import InvalidCredsError from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib try: import simplejson as json except ImportError: import json class BrightboxResponse(JsonResponse): def success(self): return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST def parse_body(self): if self.headers['content-type'].split(';')[0] == 'application/json': return super(BrightboxResponse, self).parse_body() else: return self.body def parse_error(self): response = super(BrightboxResponse, self).parse_body() if 'error' in response: if response['error'] in ['invalid_client', 'unauthorized_client']: raise InvalidCredsError(response['error']) return response['error'] elif 'error_name' in response: return '%s: %s' % (response['error_name'], response['errors'][0]) return self.body class BrightboxConnection(ConnectionUserAndKey): """ Connection class for the Brightbox driver """ host = 'api.gb1.brightbox.com' responseCls = BrightboxResponse def _fetch_oauth_token(self): body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' % (self.user_id, self.key)))).rstrip() self.connect() headers = { 'Host': self.host, 'User-Agent': self._user_agent(), 'Authorization': authorization, 'Content-Type': 'application/json', 'Content-Length': str(len(body)) } response = self.connection.request(method='POST', url='/token', body=body, headers=headers) response = self.connection.getresponse() if response.status == httplib.OK: return json.loads(response.read())['access_token'] else: responseCls = BrightboxResponse(response=response, connection=self) message = responseCls.parse_error() raise InvalidCredsError(message) def add_default_headers(self, headers): try: headers['Authorization'] = 'OAuth ' + self.token except AttributeError: self.token = self._fetch_oauth_token() headers['Authorization'] = 'OAuth ' + self.token return headers def encode_data(self, data): return json.dumps(data) apache-libcloud-2.2.1/libcloud/common/azure_arm.py0000664000175000017500000002336313153541406022053 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import simplejson as json except ImportError: import json import time from libcloud.utils.py3 import urlparse from libcloud.common.base import (ConnectionUserAndKey, JsonResponse, RawResponse) from libcloud.http import LibcloudConnection from libcloud.utils.py3 import basestring, urlencode class AzureBaseDriver(object): name = "Microsoft Azure Resource Management API" class AzureJsonResponse(JsonResponse): def parse_error(self): b = self.parse_body() if isinstance(b, basestring): return b elif isinstance(b, dict) and "error" in b: return "[%s] %s" % (b["error"].get("code"), b["error"].get("message")) else: return str(b) class AzureAuthJsonResponse(JsonResponse): def parse_error(self): b = self.parse_body() if isinstance(b, basestring): return b elif isinstance(b, dict) and "error_description" in b: return b["error_description"] else: return str(b) # Based on # https://github.com/Azure/azure-xplat-cli/blob/master/lib/util/profile/environment.js publicEnvironments = { "default": { 'name': 'default', 'portalUrl': 'http://go.microsoft.com/fwlink/?LinkId=254433', 'publishingProfileUrl': 'http://go.microsoft.com/fwlink/?LinkId=254432', 'managementEndpointUrl': 'https://management.core.windows.net', 'resourceManagerEndpointUrl': 'https://management.azure.com/', 'sqlManagementEndpointUrl': 'https://management.core.windows.net:8443/', 'sqlServerHostnameSuffix': '.database.windows.net', 'galleryEndpointUrl': 'https://gallery.azure.com/', 'activeDirectoryEndpointUrl': 'https://login.microsoftonline.com', 'activeDirectoryResourceId': 'https://management.core.windows.net/', 'activeDirectoryGraphResourceId': 'https://graph.windows.net/', 'activeDirectoryGraphApiVersion': '2013-04-05', 'storageEndpointSuffix': '.core.windows.net', 'keyVaultDnsSuffix': '.vault.azure.net', 'azureDataLakeStoreFileSystemEndpointSuffix': 'azuredatalakestore.net', 'azureDataLakeAnalyticsCatalogAndJobEndpointSuffix': 'azuredatalakeanalytics.net' }, "AzureChinaCloud": { 'name': 'AzureChinaCloud', 'portalUrl': 'http://go.microsoft.com/fwlink/?LinkId=301902', 'publishingProfileUrl': 'http://go.microsoft.com/fwlink/?LinkID=301774', 'managementEndpointUrl': 'https://management.core.chinacloudapi.cn', 'resourceManagerEndpointUrl': 'https://management.chinacloudapi.cn', 'sqlManagementEndpointUrl': 'https://management.core.chinacloudapi.cn:8443/', 'sqlServerHostnameSuffix': '.database.chinacloudapi.cn', 'galleryEndpointUrl': 'https://gallery.chinacloudapi.cn/', 'activeDirectoryEndpointUrl': 'https://login.chinacloudapi.cn', 'activeDirectoryResourceId': 'https://management.core.chinacloudapi.cn/', 'activeDirectoryGraphResourceId': 'https://graph.chinacloudapi.cn/', 'activeDirectoryGraphApiVersion': '2013-04-05', 'storageEndpointSuffix': '.core.chinacloudapi.cn', 'keyVaultDnsSuffix': '.vault.azure.cn', 'azureDataLakeStoreFileSystemEndpointSuffix': 'N/A', 'azureDataLakeAnalyticsCatalogAndJobEndpointSuffix': 'N/A' }, "AzureUSGovernment": { 'name': 'AzureUSGovernment', 'portalUrl': 'https://manage.windowsazure.us', 'publishingProfileUrl': 'https://manage.windowsazure.us/publishsettings/index', 'managementEndpointUrl': 'https://management.core.usgovcloudapi.net', 'resourceManagerEndpointUrl': 'https://management.usgovcloudapi.net', 'sqlManagementEndpointUrl': 'https://management.core.usgovcloudapi.net:8443/', 'sqlServerHostnameSuffix': '.database.usgovcloudapi.net', 'galleryEndpointUrl': 'https://gallery.usgovcloudapi.net/', 'activeDirectoryEndpointUrl': 'https://login-us.microsoftonline.com', 'activeDirectoryResourceId': 'https://management.core.usgovcloudapi.net/', 'activeDirectoryGraphResourceId': 'https://graph.windows.net/', 'activeDirectoryGraphApiVersion': '2013-04-05', 'storageEndpointSuffix': '.core.usgovcloudapi.net', 'keyVaultDnsSuffix': '.vault.usgovcloudapi.net', 'azureDataLakeStoreFileSystemEndpointSuffix': 'N/A', 'azureDataLakeAnalyticsCatalogAndJobEndpointSuffix': 'N/A' }, "AzureGermanCloud": { 'name': 'AzureGermanCloud', 'portalUrl': 'http://portal.microsoftazure.de/', 'publishingProfileUrl': 'https://manage.microsoftazure.de/publishsettings/index', 'managementEndpointUrl': 'https://management.core.cloudapi.de', 'resourceManagerEndpointUrl': 'https://management.microsoftazure.de', 'sqlManagementEndpointUrl': 'https://management.core.cloudapi.de:8443/', 'sqlServerHostnameSuffix': '.database.cloudapi.de', 'galleryEndpointUrl': 'https://gallery.cloudapi.de/', 'activeDirectoryEndpointUrl': 'https://login.microsoftonline.de', 'activeDirectoryResourceId': 'https://management.core.cloudapi.de/', 'activeDirectoryGraphResourceId': 'https://graph.cloudapi.de/', 'activeDirectoryGraphApiVersion': '2013-04-05', 'storageEndpointSuffix': '.core.cloudapi.de', 'keyVaultDnsSuffix': '.vault.microsoftazure.de', 'azureDataLakeStoreFileSystemEndpointSuffix': 'N/A', 'azureDataLakeAnalyticsCatalogAndJobEndpointSuffix': 'N/A' } } class AzureResourceManagementConnection(ConnectionUserAndKey): """ Represents a single connection to Azure """ conn_class = LibcloudConnection driver = AzureBaseDriver name = 'Azure AD Auth' responseCls = AzureJsonResponse rawResponseCls = RawResponse def __init__(self, key, secret, secure=True, tenant_id=None, subscription_id=None, cloud_environment=None, **kwargs): super(AzureResourceManagementConnection, self) \ .__init__(key, secret, **kwargs) if not cloud_environment: cloud_environment = "default" if isinstance(cloud_environment, basestring): cloud_environment = publicEnvironments[cloud_environment] if not isinstance(cloud_environment, dict): raise Exception("cloud_environment must be one of '%s' or a dict " "containing keys 'resourceManagerEndpointUrl', " "'activeDirectoryEndpointUrl', " "'activeDirectoryResourceId', " "'storageEndpointSuffix'" % ( "', '".join(publicEnvironments.keys()))) self.host = urlparse.urlparse( cloud_environment['resourceManagerEndpointUrl']).hostname self.login_host = urlparse.urlparse( cloud_environment['activeDirectoryEndpointUrl']).hostname self.login_resource = cloud_environment['activeDirectoryResourceId'] self.storage_suffix = cloud_environment['storageEndpointSuffix'] self.tenant_id = tenant_id self.subscription_id = subscription_id def add_default_headers(self, headers): headers['Content-Type'] = "application/json" headers['Authorization'] = "Bearer %s" % self.access_token return headers def encode_data(self, data): """Encode data to JSON""" return json.dumps(data) def get_token_from_credentials(self): """ Log in and get bearer token used to authorize API requests. """ conn = self.conn_class(self.login_host, 443) conn.connect() params = urlencode({ "grant_type": "client_credentials", "client_id": self.user_id, "client_secret": self.key, "resource": self.login_resource }) headers = {"Content-type": "application/x-www-form-urlencoded"} conn.request("POST", "/%s/oauth2/token" % self.tenant_id, params, headers) js = AzureAuthJsonResponse(conn.getresponse(), conn) self.access_token = js.object["access_token"] self.expires_on = js.object["expires_on"] def connect(self, **kwargs): self.get_token_from_credentials() return super(AzureResourceManagementConnection, self).connect(**kwargs) def request(self, action, params=None, data=None, headers=None, method='GET', raw=False): # Log in again if the token has expired or is going to expire soon # (next 5 minutes). if (time.time() + 300) >= int(self.expires_on): self.get_token_from_credentials() return super(AzureResourceManagementConnection, self) \ .request(action, params=params, data=data, headers=headers, method=method, raw=raw) apache-libcloud-2.2.1/libcloud/common/azure.py0000664000175000017500000002265113153541406021213 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import time import base64 import hmac from hashlib import sha256 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.xml import fixxpath from libcloud.utils.py3 import ET from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.common.base import ConnectionUserAndKey, RawResponse from libcloud.common.base import CertificateConnection from libcloud.common.base import XmlResponse # Azure API version API_VERSION = '2012-02-12' # The time format for headers in Azure requests AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' class AzureRedirectException(Exception): def __init__(self, response): self.location = response.headers['location'] class AzureResponse(XmlResponse): valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT, httplib.BAD_REQUEST, httplib.TEMPORARY_REDIRECT # added TEMPORARY_REDIRECT as this can sometimes be # sent by azure instead of a success or fail response ] def success(self): i = int(self.status) return 200 <= i <= 299 or i in self.valid_response_codes def parse_error(self, msg=None): error_msg = 'Unknown error' try: # Azure does give some meaningful errors, but is inconsistent # Some APIs respond with an XML error. Others just dump HTML body = self.parse_body() # pylint: disable=no-member if type(body) == ET.Element: code = body.findtext(fixxpath(xpath='Code')) message = body.findtext(fixxpath(xpath='Message')) message = message.split('\n')[0] error_msg = '%s: %s' % (code, message) except MalformedResponseError: pass if msg: error_msg = '%s - %s' % (msg, error_msg) if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(error_msg) raise LibcloudError( '%s Status code: %d.' % (error_msg, self.status), driver=self ) def parse_body(self): is_redirect = int(self.status) == httplib.TEMPORARY_REDIRECT if is_redirect and self.connection.driver.follow_redirects: raise AzureRedirectException(self) else: return super(AzureResponse, self).parse_body() class AzureRawResponse(RawResponse): pass class AzureConnection(ConnectionUserAndKey): """ Represents a single connection to Azure """ responseCls = AzureResponse rawResponseCls = AzureRawResponse skip_host = False skip_accept_encoding = False def add_default_params(self, params): return params def pre_connect_hook(self, params, headers): headers = copy.deepcopy(headers) # We have to add a date header in GMT headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) headers['x-ms-version'] = API_VERSION # Add the authorization header headers['Authorization'] = self._get_azure_auth_signature( method=self.method, headers=headers, params=params, account=self.user_id, secret_key=self.key, path=self.action ) # Azure cribs about this in 'raw' connections headers.pop('Host', None) return params, headers def _get_azure_auth_signature(self, method, headers, params, account, secret_key, path='/'): """ Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); StringToSign = HTTP-VERB + "\n" + Content-Encoding + "\n" + Content-Language + "\n" + Content-Length + "\n" + Content-MD5 + "\n" + Content-Type + "\n" + Date + "\n" + If-Modified-Since + "\n" + If-Match + "\n" + If-None-Match + "\n" + If-Unmodified-Since + "\n" + Range + "\n" + CanonicalizedHeaders + CanonicalizedResource; """ special_header_values = [] xms_header_values = [] param_list = [] special_header_keys = [ 'content-encoding', 'content-language', 'content-length', 'content-md5', 'content-type', 'date', 'if-modified-since', 'if-match', 'if-none-match', 'if-unmodified-since', 'range' ] # Split the x-ms headers and normal headers and make everything # lower case headers_copy = {} for header, value in headers.items(): header = header.lower() value = str(value).strip() if header.startswith('x-ms-'): xms_header_values.append((header, value)) else: headers_copy[header] = value # Get the values for the headers in the specific order for header in special_header_keys: header = header.lower() # Just for safety if header in headers_copy: special_header_values.append(headers_copy[header]) else: special_header_values.append('') # Prepare the first section of the string to be signed values_to_sign = [method] + special_header_values # string_to_sign = '\n'.join([method] + special_header_values) # The x-ms-* headers have to be in lower case and sorted xms_header_values.sort() for header, value in xms_header_values: values_to_sign.append('%s:%s' % (header, value)) # Add the canonicalized path values_to_sign.append('/%s%s' % (account, path)) # URL query parameters (sorted and lower case) for key, value in params.items(): param_list.append((key.lower(), str(value).strip())) param_list.sort() for key, value in param_list: values_to_sign.append('%s:%s' % (key, value)) string_to_sign = b('\n'.join(values_to_sign)) secret_key = b(secret_key) b64_hmac = base64.b64encode( hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() ) return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8')) class AzureBaseDriver(object): name = "Microsoft Azure Service Management API" class AzureServiceManagementConnection(CertificateConnection): # This needs the following approach - # 1. Make request using LibcloudHTTPSConnection which is a overloaded # class which takes in a client certificate # 2. Depending on the type of operation use a PollingConnection # when the response id is returned # 3. The Response can be used in an AzureServiceManagementResponse """ Authentication class for "Service Account" authentication. """ driver = AzureBaseDriver responseCls = AzureResponse rawResponseCls = AzureRawResponse name = 'Azure Service Management API Connection' host = 'management.core.windows.net' keyfile = "" def __init__(self, subscription_id, key_file, *args, **kwargs): """ Check to see if PyCrypto is available, and convert key file path into a key string if the key is in a file. :param subscription_id: Azure subscription ID. :type subscription_id: ``str`` :param key_file: The PEM file used to authenticate with the service. :type key_file: ``str`` """ super(AzureServiceManagementConnection, self).__init__( key_file, *args, **kwargs ) self.subscription_id = subscription_id keypath = os.path.expanduser(key_file) self.keyfile = keypath is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise InvalidCredsError( 'You need an certificate PEM file to authenticate with ' 'Microsoft Azure. This can be found in the portal.' ) self.key_file = key_file def add_default_headers(self, headers): """ @inherits: :class:`Connection.add_default_headers` TODO: move to constant.. """ headers['x-ms-version'] = "2014-05-01" headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) # headers['host'] = self.host return headers apache-libcloud-2.2.1/libcloud/common/onapp.py0000664000175000017500000000250612701023453021172 0ustar kamikami00000000000000from base64 import b64encode from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey, JsonResponse class OnAppResponse(JsonResponse): """ OnApp response class """ def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT] class OnAppConnection(ConnectionUserAndKey): """ OnApp connection class """ responseCls = OnAppResponse def add_default_headers(self, headers): """ Add Basic Authentication header to all the requests. It injects the "Authorization: Basic Base64String===" header in each request :type headers: ``dict`` :param headers: Default input headers :rtype: ``dict`` :return: Default input headers with the "Authorization" header. """ b64string = b("%s:%s" % (self.user_id, self.key)) encoded = b64encode(b64string).decode("utf-8") headers["Authorization"] = "Basic " + encoded return headers apache-libcloud-2.2.1/libcloud/common/vultr.py0000664000175000017500000000640212701023453021230 0ustar kamikami00000000000000from libcloud.common.base import ConnectionKey, JsonResponse __all__ = [ 'API_HOST', 'VultrConnection', 'VultrException', 'VultrResponse', ] # Endpoint for the Vultr API API_HOST = 'api.vultr.com' class VultrResponse(JsonResponse): objects = None error_dict = {} errors = None ERROR_CODE_MAP = { 400: "Invalid API location. Check the URL that you are using.", 403: "Invalid or missing API key. Check that your API key is present" + " and matches your assigned key.", 405: "Invalid HTTP method. Check that the method (POST|GET) matches" + " what the documentation indicates.", 412: "Request failed. Check the response body for a more detailed" + " description.", 500: "Internal server error. Try again at a later time.", 503: "Rate limit hit. API requests are limited to an average of 1/s." + " Try your request again later.", } def __init__(self, response, connection): self.errors = [] super(VultrResponse, self).__init__(response=response, connection=connection) self.objects, self.errors = self.parse_body_and_errors() if not self.success(): raise self._make_excp(self.errors[0]) def parse_body_and_errors(self): """ Returns JSON data in a python list. """ json_objects = [] errors = [] if self.status in self.ERROR_CODE_MAP: self.error_dict['ERRORCODE'] = self.status self.error_dict['ERRORMESSAGE'] = self.ERROR_CODE_MAP[self.status] errors.append(self.error_dict) js = super(VultrResponse, self).parse_body() if isinstance(js, dict): js = [js] json_objects.append(js) return (json_objects, errors) def _make_excp(self, error): """ Convert API error to a VultrException instance """ return VultrException(error['ERRORCODE'], error['ERRORMESSAGE']) def success(self): return len(self.errors) == 0 class VultrConnection(ConnectionKey): """ A connection to the Vultr API """ host = API_HOST responseCls = VultrResponse def add_default_params(self, params): """ Returns default params such as api_key which is needed to perform an action.Returns a dictionary. Example:/v1/server/upgrade_plan?api_key=self.key """ params['api_key'] = self.key return params def add_default_headers(self, headers): """ Returns default headers such as content-type. Returns a dictionary. """ headers["Content-Type"] = "application/x-www-form-urlencoded" headers["Accept"] = "text/plain" return headers def set_path(self): self.path = '/v/' return self.path class VultrException(Exception): """ Error originating from the Vultr API """ def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "(%u) %s" % (self.code, self.message) def __repr__(self): return "VultrException code %u '%s'" % (self.code, self.message) apache-libcloud-2.2.1/libcloud/common/nfsn.py0000664000175000017500000001005112701023453021013 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import random import string import time from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse from libcloud.common.types import InvalidCredsError, ProviderError from libcloud.utils.py3 import basestring, httplib, urlencode SALT_CHARACTERS = string.ascii_letters + string.digits class NFSNException(ProviderError): def __init__(self, value, http_code, code, driver=None): self.code = code super(NFSNException, self).__init__(value, http_code, driver) class NFSNResponse(JsonResponse): def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError('Invalid provider credentials') body = self.parse_body() if isinstance(body, basestring): return body + ' (HTTP Code: %d)' % self.status error = body.get('error', None) debug = body.get('debug', None) # If we only have one of "error" or "debug", use the one that we have. # If we have both, use both, with a space character in between them. value = 'No message specified' if error is not None: value = error if debug is not None: value = debug if error is not None and value is not None: value = error + ' ' + value value = value + ' (HTTP Code: %d)' % self.status return value class NFSNConnection(ConnectionUserAndKey): host = 'api.nearlyfreespeech.net' responseCls = NFSNResponse allow_insecure = False def _header(self, action, data): """ Build the contents of the X-NFSN-Authentication HTTP header. See https://members.nearlyfreespeech.net/wiki/API/Introduction for more explanation. """ login = self.user_id timestamp = self._timestamp() salt = self._salt() api_key = self.key data = urlencode(data) data_hash = hashlib.sha1(data.encode('utf-8')).hexdigest() string = ';'.join((login, timestamp, salt, api_key, action, data_hash)) string_hash = hashlib.sha1(string.encode('utf-8')).hexdigest() return ';'.join((login, timestamp, salt, string_hash)) def request(self, action, params=None, data='', headers=None, method='GET'): """ Add the X-NFSN-Authentication header to an HTTP request. """ if not headers: headers = {} if not params: params = {} header = self._header(action, data) headers['X-NFSN-Authentication'] = header if method == 'POST': headers['Content-Type'] = 'application/x-www-form-urlencoded' return ConnectionUserAndKey.request(self, action, params, data, headers, method) def encode_data(self, data): """ NFSN expects the body to be regular key-value pairs that are not JSON-encoded. """ if data: data = urlencode(data) return data def _salt(self): """ Return a 16-character alphanumeric string. """ r = random.SystemRandom() return ''.join(r.choice(SALT_CHARACTERS) for _ in range(16)) def _timestamp(self): """ Return the current number of seconds since the Unix epoch, as a string. """ return str(int(time.time())) apache-libcloud-2.2.1/libcloud/common/durabledns.py0000664000175000017500000002467412705475361022227 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from xml.etree import ElementTree as ET # noqa from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import XmlResponse # API HOST to connect API_HOST = 'durabledns.com' def _schema_builder(urn_nid, method, attributes): """ Return a xml schema used to do an API request. :param urn_nid: API urn namespace id. :type urn_nid: type: ``str`` :param method: API method. :type method: type: ``str`` :param attributes: List of attributes to include. :type attributes: ``list`` of ``str`` rtype: :class:`Element` """ soap = ET.Element( 'soap:Body', {'xmlns:m': "https://durabledns.com/services/dns/%s" % method} ) urn = ET.SubElement(soap, 'urn:%s:%s' % (urn_nid, method)) # Attributes specification for attribute in attributes: ET.SubElement(urn, 'urn:%s:%s' % (urn_nid, attribute)) return soap SCHEMA_BUILDER_MAP = { 'list_zones': { 'urn_nid': 'listZoneswsdl', 'method': 'listZones', 'attributes': ['apiuser', 'apikey'] }, 'list_records': { 'urn_nid': 'listRecordswsdl', 'method': 'listRecords', 'attributes': ['apiuser', 'apikey', 'zonename'] }, 'get_zone': { 'urn_nid': 'getZonewsdl', 'method': 'getZone', 'attributes': ['apiuser', 'apikey', 'zonename'] }, 'get_record': { 'urn_nid': 'getRecordwsdl', 'method': 'getRecord', 'attributes': ['apiuser', 'apikey', 'zonename', 'recordid'] }, 'create_zone': { 'urn_nid': 'createZonewsdl', 'method': 'createZone', 'attributes': ['apiuser', 'apikey', 'zonename', 'ns', 'mbox', 'refresh', 'retry', 'expire', 'minimum', 'ttl', 'xfer', 'update_acl'] }, 'create_record': { 'urn_nid': 'createRecordwsdl', 'method': 'createRecord', 'attributes': ['apiuser', 'apikey', 'zonename', 'name', 'type', 'data', 'aux', 'ttl', 'ddns_enabled'] }, 'update_zone': { 'urn_nid': 'updateZonewsdl', 'method': 'updateZone', 'attributes': ['apiuser', 'apikey', 'zonename', 'ns', 'mbox', 'refresh', 'retry', 'expire', 'minimum', 'ttl', 'xfer', 'update_acl'] }, 'update_record': { 'urn_nid': 'updateRecordwsdl', 'method': 'updateRecord', 'attributes': ['apiuser', 'apikey', 'zonename', 'id', 'name', 'aux', 'data', 'ttl', 'ddns_enabled'] }, 'delete_zone': { 'urn_nid': 'deleteZonewsdl', 'method': 'deleteZone', 'attributes': ['apiuser', 'apikey', 'zonename'] }, 'delete_record': { 'urn_nid': 'deleteRecordwsdl', 'method': 'deleteRecord', 'attributes': ['apiuser', 'apikey', 'zonename', 'id'] } } class DurableDNSException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "DurableDNSException %s %s" % (self.code, self.message) class DurableResponse(XmlResponse): errors = [] objects = [] def __init__(self, response, connection): super(DurableResponse, self).__init__(response=response, connection=connection) self.objects, self.errors = self.parse_body_and_error() if self.errors: raise self._make_excp(self.errors[0]) def parse_body_and_error(self): """ Used to parse body from httplib.HttpResponse object. """ objects = [] errors = [] error_dict = {} extra = {} zone_dict = {} record_dict = {} xml_obj = self.parse_body() # pylint: disable=no-member envelop_body = xml_obj.getchildren()[0] method_resp = envelop_body.getchildren()[0] # parse the xml_obj # handle errors if 'Fault' in method_resp.tag: fault = [fault for fault in method_resp.getchildren() if fault.tag == 'faultstring'][0] error_dict['ERRORMESSAGE'] = fault.text.strip() error_dict['ERRORCODE'] = self.status errors.append(error_dict) # parsing response from listZonesResponse if 'listZonesResponse' in method_resp.tag: answer = method_resp.getchildren()[0] for element in answer: zone_dict['id'] = element.getchildren()[0].text objects.append(zone_dict) # reset the zone_dict zone_dict = {} # parse response from listRecordsResponse if 'listRecordsResponse' in method_resp.tag: answer = method_resp.getchildren()[0] for element in answer: for child in element.getchildren(): if child.tag == 'id': record_dict['id'] = child.text.strip() objects.append(record_dict) # reset the record_dict for later usage record_dict = {} # parse response from getZoneResponse if 'getZoneResponse' in method_resp.tag: for child in method_resp.getchildren(): if child.tag == 'origin': zone_dict['id'] = child.text.strip() zone_dict['domain'] = child.text.strip() elif child.tag == 'ttl': zone_dict['ttl'] = int(child.text.strip()) elif child.tag == 'retry': extra['retry'] = int(child.text.strip()) elif child.tag == 'expire': extra['expire'] = int(child.text.strip()) elif child.tag == 'minimum': extra['minimum'] = int(child.text.strip()) else: if child.text: extra[child.tag] = child.text.strip() else: extra[child.tag] = '' zone_dict['extra'] = extra objects.append(zone_dict) # parse response from getRecordResponse if 'getRecordResponse' in method_resp.tag: answer = method_resp.getchildren()[0] for child in method_resp.getchildren(): if child.tag == 'id' and child.text: record_dict['id'] = child.text.strip() elif child.tag == 'name' and child.text: record_dict['name'] = child.text.strip() elif child.tag == 'type' and child.text: record_dict['type'] = child.text.strip() elif child.tag == 'data' and child.text: record_dict['data'] = child.text.strip() elif child.tag == 'aux' and child.text: record_dict['aux'] = child.text.strip() elif child.tag == 'ttl' and child.text: record_dict['ttl'] = child.text.strip() if not record_dict: error_dict['ERRORMESSAGE'] = 'Record does not exist' error_dict['ERRORCODE'] = 404 errors.append(error_dict) objects.append(record_dict) record_dict = {} if 'createZoneResponse' in method_resp.tag: answer = method_resp.getchildren()[0] if answer.tag == 'return' and answer.text: record_dict['id'] = answer.text.strip() objects.append(record_dict) # catch Record does not exists error when deleting record if 'deleteRecordResponse' in method_resp.tag: answer = method_resp.getchildren()[0] if 'Record does not exists' in answer.text.strip(): errors.append({'ERRORMESSAGE': answer.text.strip(), 'ERRORCODE': self.status}) # parse response in createRecordResponse if 'createRecordResponse' in method_resp.tag: answer = method_resp.getchildren()[0] record_dict['id'] = answer.text.strip() objects.append(record_dict) record_dict = {} return (objects, errors) def parse_body(self): # A problem arise in the api response because there are undeclared # xml namespaces. In order to fix that at the moment, we use the # _fix_response method to clean up since we won't always have lxml # library. self._fix_response() body = super(DurableResponse, self).parse_body() return body def success(self): """ Used to determine if the request was successful. """ return len(self.errors) == 0 def _make_excp(self, error): return DurableDNSException(error['ERRORCODE'], error['ERRORMESSAGE']) def _fix_response(self): items = re.findall('', self.body, flags=0) for item in items: parts = item.split(' ') prefix = parts[0].replace('<', '').split(':')[1] new_item = "<" + prefix + ">" close_tag = "" new_close_tag = "" self.body = self.body.replace(item, new_item) self.body = self.body.replace(close_tag, new_close_tag) class DurableConnection(ConnectionUserAndKey): host = API_HOST responseCls = DurableResponse def add_default_params(self, params): params['user_id'] = self.user_id params['key'] = self.key return params def add_default_headers(self, headers): headers['Content-Type'] = 'text/xml' headers['Content-Encoding'] = 'gzip; charset=ISO-8859-1' return headers apache-libcloud-2.2.1/libcloud/common/xmlrpc.py0000664000175000017500000000733312705475361021402 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base classes for working with xmlrpc APIs """ import sys from libcloud.utils.py3 import xmlrpclib from libcloud.utils.py3 import httplib from libcloud.common.base import Response, Connection class ProtocolError(Exception): pass class ErrorCodeMixin(object): """ This is a helper for API's that have a well defined collection of error codes that are easily parsed out of error messages. It acts as a factory: it finds the right exception for the error code, fetches any parameters it needs from the context and raises it. """ exceptions = {} def raise_exception_for_error(self, error_code, message): exceptionCls = self.exceptions.get(error_code, None) if exceptionCls is None: return context = self.connection.context driver = self.connection.driver params = {} if hasattr(exceptionCls, 'kwargs'): for key in exceptionCls.kwargs: if key in context: params[key] = context[key] raise exceptionCls(value=message, driver=driver, **params) class XMLRPCResponse(ErrorCodeMixin, Response): defaultExceptionCls = Exception def success(self): return self.status == httplib.OK def parse_body(self): try: params, methodname = xmlrpclib.loads(self.body) if len(params) == 1: params = params[0] return params except xmlrpclib.Fault: e = sys.exc_info()[1] self.raise_exception_for_error(e.faultCode, e.faultString) error_string = '%s: %s' % (e.faultCode, e.faultString) raise self.defaultExceptionCls(error_string) def parse_error(self): msg = 'Server returned an invalid xmlrpc response (%d)' % (self.status) raise ProtocolError(msg) class XMLRPCConnection(Connection): """ Connection class which can call XMLRPC based API's. This class uses the xmlrpclib marshalling and demarshalling code but uses the http transports provided by libcloud giving it better certificate validation and debugging helpers than the core client library. """ responseCls = XMLRPCResponse endpoint = None def add_default_headers(self, headers): headers['Content-Type'] = 'text/xml' return headers def request(self, method_name, *args, **kwargs): """ Call a given `method_name`. :type method_name: ``str`` :param method_name: A method exposed by the xmlrpc endpoint that you are connecting to. :type args: ``tuple`` :param args: Arguments to invoke with method with. """ endpoint = kwargs.get('endpoint', self.endpoint) data = xmlrpclib.dumps(args, methodname=method_name, allow_none=True) return super(XMLRPCConnection, self).request(endpoint, data=data, method='POST') apache-libcloud-2.2.1/libcloud/common/pointdns.py0000664000175000017500000000377412701023453021723 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse class PointDNSDNSResponse(JsonResponse): def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # response.success() only checks for 200 and 201 codes. Should we # add 202? return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] class PointDNSConnection(ConnectionUserAndKey): host = 'pointhq.com' responseCls = PointDNSDNSResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ b64string = b('%s:%s' % (self.user_id, self.key)) token = base64.b64encode(b64string) headers['Authorization'] = 'Basic %s' % token headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' return headers apache-libcloud-2.2.1/libcloud/common/dimensiondata.py0000664000175000017500000016165113153541406022710 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dimension Data Common Components """ from base64 import b64encode from time import sleep # TODO: use disutils.version when Travis CI fixed the pylint issue with version # from distutils.version import LooseVersion from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse from libcloud.compute.base import Node from libcloud.utils.py3 import basestring from libcloud.utils.xml import findtext from libcloud.compute.types import LibcloudError, InvalidCredsError # Roadmap / TODO: # # 1.0 - Copied from OpSource API, named provider details. # setup a few variables to represent all of the DimensionData cloud namespaces NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" ORGANIZATION_NS = NAMESPACE_BASE + "/organization" SERVER_NS = NAMESPACE_BASE + "/server" NETWORK_NS = NAMESPACE_BASE + "/network" DIRECTORY_NS = NAMESPACE_BASE + "/directory" GENERAL_NS = NAMESPACE_BASE + "/general" BACKUP_NS = NAMESPACE_BASE + "/backup" # API 2.0 Namespaces and URNs TYPES_URN = "urn:didata.com:api:cloud:types" # API end-points API_ENDPOINTS = { 'dd-na': { 'name': 'North America (NA)', 'host': 'api-na.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-eu': { 'name': 'Europe (EU)', 'host': 'api-eu.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-au': { 'name': 'Australia (AU)', 'host': 'api-au.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-au-gov': { 'name': 'Australia Canberra ACT (AU)', 'host': 'api-canberra.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-af': { 'name': 'Africa (AF)', 'host': 'api-mea.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-ap': { 'name': 'Asia Pacific (AP)', 'host': 'api-ap.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-latam': { 'name': 'South America (LATAM)', 'host': 'api-latam.dimensiondata.com', 'vendor': 'DimensionData' }, 'dd-canada': { 'name': 'Canada (CA)', 'host': 'api-canada.dimensiondata.com', 'vendor': 'DimensionData' }, 'is-na': { 'name': 'North America (NA)', 'host': 'usapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-eu': { 'name': 'Europe (EU)', 'host': 'euapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-au': { 'name': 'Australia (AU)', 'host': 'auapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-af': { 'name': 'Africa (AF)', 'host': 'meaapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-ap': { 'name': 'Asia Pacific (AP)', 'host': 'apapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-latam': { 'name': 'South America (LATAM)', 'host': 'latamapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-canada': { 'name': 'Canada (CA)', 'host': 'canadaapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'ntta-na': { 'name': 'North America (NA)', 'host': 'cloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-eu': { 'name': 'Europe (EU)', 'host': 'eucloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-au': { 'name': 'Australia (AU)', 'host': 'aucloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-af': { 'name': 'Africa (AF)', 'host': 'sacloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-ap': { 'name': 'Asia Pacific (AP)', 'host': 'hkcloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'cisco-na': { 'name': 'North America (NA)', 'host': 'iaas-api-na.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-eu': { 'name': 'Europe (EU)', 'host': 'iaas-api-eu.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-au': { 'name': 'Australia (AU)', 'host': 'iaas-api-au.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-af': { 'name': 'Africa (AF)', 'host': 'iaas-api-mea.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-ap': { 'name': 'Asia Pacific (AP)', 'host': 'iaas-api-ap.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-latam': { 'name': 'South America (LATAM)', 'host': 'iaas-api-sa.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-canada': { 'name': 'Canada (CA)', 'host': 'iaas-api-ca.cisco-ccs.com', 'vendor': 'Cisco' }, 'med1-il': { 'name': 'Israel (IL)', 'host': 'api.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-na': { 'name': 'North America (NA)', 'host': 'api-na.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-eu': { 'name': 'Europe (EU)', 'host': 'api-eu.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-au': { 'name': 'Australia (AU)', 'host': 'api-au.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-af': { 'name': 'Africa (AF)', 'host': 'api-af.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-ap': { 'name': 'Asia Pacific (AP)', 'host': 'api-ap.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-latam': { 'name': 'South America (LATAM)', 'host': 'api-sa.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-canada': { 'name': 'Canada (CA)', 'host': 'api-ca.cloud.med-1.com', 'vendor': 'Med-1' }, 'indosat-id': { 'name': 'Indonesia (ID)', 'host': 'iaas-api.indosat.com', 'vendor': 'Indosat' }, 'indosat-na': { 'name': 'North America (NA)', 'host': 'iaas-usapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-eu': { 'name': 'Europe (EU)', 'host': 'iaas-euapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-au': { 'name': 'Australia (AU)', 'host': 'iaas-auapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-af': { 'name': 'Africa (AF)', 'host': 'iaas-afapi.indosat.com', 'vendor': 'Indosat' }, 'bsnl-in': { 'name': 'India (IN)', 'host': 'api.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-na': { 'name': 'North America (NA)', 'host': 'usapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-eu': { 'name': 'Europe (EU)', 'host': 'euapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-au': { 'name': 'Australia (AU)', 'host': 'auapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-af': { 'name': 'Africa (AF)', 'host': 'afapi.bsnlcloud.com', 'vendor': 'BSNL' } } # Default API end-point for the base connection class. DEFAULT_REGION = 'dd-na' BAD_CODE_XML_ELEMENTS = ( ('responseCode', SERVER_NS), ('responseCode', TYPES_URN), ('result', GENERAL_NS) ) BAD_MESSAGE_XML_ELEMENTS = ( ('message', SERVER_NS), ('message', TYPES_URN), ('resultDetail', GENERAL_NS) ) def dd_object_to_id(obj, obj_type, id_value='id'): """ Takes in a DD object or string and prints out it's id This is a helper method, as many of our functions can take either an object or a string, and we need an easy way of converting them :param obj: The object to get the id for :type obj: ``object`` :param func: The function to call, e.g. ex_get_vlan. Note: This function needs to return an object which has ``status`` attribute. :type func: ``function`` :rtype: ``str`` """ if isinstance(obj, obj_type): return getattr(obj, id_value) elif isinstance(obj, (basestring)): return obj else: raise TypeError( "Invalid type %s looking for basestring or %s" % (type(obj).__name__, obj_type.__name__) ) # TODO: use disutils.version when Travis CI fixed the pylint issue with version # This is a temporary workaround. def LooseVersion(version): return float(version) class NetworkDomainServicePlan(object): ESSENTIALS = "ESSENTIALS" ADVANCED = "ADVANCED" class DimensionDataRawResponse(RawResponse): pass class DimensionDataResponse(XmlResponse): def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError(self.body) elif self.status == httplib.FORBIDDEN: raise InvalidCredsError(self.body) body = self.parse_body() if self.status == httplib.BAD_REQUEST: for response_code in BAD_CODE_XML_ELEMENTS: code = findtext(body, response_code[0], response_code[1]) if code is not None: break for message in BAD_MESSAGE_XML_ELEMENTS: message = findtext(body, message[0], message[1]) if message is not None: break raise DimensionDataAPIException(code=code, msg=message, driver=self.connection.driver) if self.status is not httplib.OK: raise DimensionDataAPIException(code=self.status, msg=body, driver=self.connection.driver) return self.body class DimensionDataAPIException(LibcloudError): def __init__(self, code, msg, driver): self.code = code self.msg = msg self.driver = driver def __str__(self): return "%s: %s" % (self.code, self.msg) def __repr__(self): return ("" % (self.code, self.msg)) class DimensionDataConnection(ConnectionUserAndKey): """ Connection class for the DimensionData driver """ api_path_version_1 = '/oec' api_path_version_2 = '/caas' api_version_1 = 0.9 # Earliest version supported oldest_api_version = '2.2' # Latest version supported latest_api_version = '2.4' # Default api version active_api_version = '2.4' _orgId = None responseCls = DimensionDataResponse rawResponseCls = DimensionDataRawResponse allow_insecure = False def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, api_version=None, **conn_kwargs): super(DimensionDataConnection, self).__init__( user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url) if conn_kwargs['region']: self.host = conn_kwargs['region']['host'] if api_version: if LooseVersion(api_version) < LooseVersion( self.oldest_api_version): msg = 'API Version specified is too old. No longer ' \ 'supported. Please upgrade to the latest version {}' \ .format(self.active_api_version) raise DimensionDataAPIException(code=None, msg=msg, driver=self.driver) elif LooseVersion(api_version) > LooseVersion( self.latest_api_version): msg = 'Unsupported API Version. The version specified is ' \ 'not release yet. Please use the latest supported ' \ 'version {}' \ .format(self.active_api_version) raise DimensionDataAPIException(code=None, msg=msg, driver=self.driver) else: # Overwrite default version using the version user specified self.active_api_version = api_version def add_default_headers(self, headers): headers['Authorization'] = \ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) headers['Content-Type'] = 'application/xml' return headers def request_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s/%s" % (self.api_path_version_1, self.api_version_1, action) return super(DimensionDataConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def request_api_2(self, path, action, params=None, data='', headers=None, method='GET'): action = "%s/%s/%s/%s" % (self.api_path_version_2, self.active_api_version, path, action) return super(DimensionDataConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def raw_request_with_orgId_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_1(), action) return super(DimensionDataConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=True) def request_with_orgId_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_1(), action) return super(DimensionDataConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def request_with_orgId_api_2(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_2(), action) return super(DimensionDataConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def paginated_request_with_orgId_api_2(self, action, params=None, data='', headers=None, method='GET', page_size=250): """ A paginated request to the MCP2.0 API This essentially calls out to request_with_orgId_api_2 for each page and yields the response to make a generator This generator can be looped through to grab all the pages. :param action: The resource to access (i.e. 'network/vlan') :type action: ``str`` :param params: Parameters to give to the action :type params: ``dict`` or ``None`` :param data: The data payload to be added to the request :type data: ``str`` :param headers: Additional header to be added to the request :type headers: ``str`` or ``dict`` or ``None`` :param method: HTTP Method for the request (i.e. 'GET', 'POST') :type method: ``str`` :param page_size: The size of each page to be returned Note: Max page size in MCP2.0 is currently 250 :type page_size: ``int`` """ if params is None: params = {} params['pageSize'] = page_size resp = self.request_with_orgId_api_2(action, params, data, headers, method).object yield resp if len(resp) <= 0: raise StopIteration pcount = resp.get('pageCount') # pylint: disable=no-member psize = resp.get('pageSize') # pylint: disable=no-member pnumber = resp.get('pageNumber') # pylint: disable=no-member while int(pcount) >= int(psize): params['pageNumber'] = int(pnumber) + 1 resp = self.request_with_orgId_api_2(action, params, data, headers, method).object pcount = resp.get('pageCount') # pylint: disable=no-member psize = resp.get('pageSize') # pylint: disable=no-member pnumber = resp.get('pageNumber') # pylint: disable=no-member yield resp def get_resource_path_api_1(self): """ This method returns a resource path which is necessary for referencing resources that require a full path instead of just an ID, such as networks, and customer snapshots. """ return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1, self._get_orgId())) def get_resource_path_api_2(self): """ This method returns a resource path which is necessary for referencing resources that require a full path instead of just an ID, such as networks, and customer snapshots. """ return ("%s/%s/%s" % (self.api_path_version_2, self.active_api_version, self._get_orgId())) def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args, **kwargs): """ Wait for the function which returns a instance with field status/state to match. Keep polling func until one of the desired states is matched :param state: Either the desired state (`str`) or a `list` of states :type state: ``str`` or ``list`` :param func: The function to call, e.g. ex_get_vlan. Note: This function needs to return an object which has ``status`` attribute. :type func: ``function`` :param poll_interval: The number of seconds to wait between checks :type poll_interval: `int` :param timeout: The total number of seconds to wait to reach a state :type timeout: `int` :param args: The arguments for func :type args: Positional arguments :param kwargs: The arguments for func :type kwargs: Keyword arguments :return: Result from the calling function. """ cnt = 0 result = None object_state = None while cnt < timeout / poll_interval: result = func(*args, **kwargs) if isinstance(result, Node): object_state = result.state else: object_state = result.status if object_state is state or object_state in state: return result sleep(poll_interval) cnt += 1 msg = 'Status check for object %s timed out' % (result) raise DimensionDataAPIException(code=object_state, msg=msg, driver=self.driver) def _get_orgId(self): """ Send the /myaccount API request to DimensionData cloud and parse the 'orgId' from the XML response object. We need the orgId to use most of the other API functions """ if self._orgId is None: body = self.request_api_1('myaccount').object self._orgId = findtext(body, 'orgId', DIRECTORY_NS) return self._orgId def get_account_details(self): """ Get the details of this account :rtype: :class:`DimensionDataAccountDetails` """ body = self.request_api_1('myaccount').object return DimensionDataAccountDetails( user_name=findtext(body, 'userName', DIRECTORY_NS), full_name=findtext(body, 'fullName', DIRECTORY_NS), first_name=findtext(body, 'firstName', DIRECTORY_NS), last_name=findtext(body, 'lastName', DIRECTORY_NS), email=findtext(body, 'emailAddress', DIRECTORY_NS)) class DimensionDataAccountDetails(object): """ Dimension Data account class details """ def __init__(self, user_name, full_name, first_name, last_name, email): self.user_name = user_name self.full_name = full_name self.first_name = first_name self.last_name = last_name self.email = email class DimensionDataStatus(object): """ DimensionData API pending operation status class action, request_time, user_name, number_of_steps, update_time, step.name, step.number, step.percent_complete, failure_reason, """ def __init__(self, action=None, request_time=None, user_name=None, number_of_steps=None, update_time=None, step_name=None, step_number=None, step_percent_complete=None, failure_reason=None): self.action = action self.request_time = request_time self.user_name = user_name self.number_of_steps = number_of_steps self.update_time = update_time self.step_name = step_name self.step_number = step_number self.step_percent_complete = step_percent_complete self.failure_reason = failure_reason def __repr__(self): return (('') % (self.action, self.request_time, self.user_name, self.number_of_steps, self.update_time, self.step_name, self.step_number, self.step_percent_complete, self.failure_reason)) class DimensionDataNetwork(object): """ DimensionData network with location. """ def __init__(self, id, name, description, location, private_net, multicast, status): self.id = str(id) self.name = name self.description = description self.location = location self.private_net = private_net self.multicast = multicast self.status = status def __repr__(self): return (('') % (self.id, self.name, self.description, self.location, self.private_net, self.multicast)) class DimensionDataNetworkDomain(object): """ DimensionData network domain with location. """ def __init__(self, id, name, description, location, status, plan): self.id = str(id) self.name = name self.description = description self.location = location self.status = status self.plan = plan def __repr__(self): return (('') % (self.id, self.name, self.description, self.location, self.status, self.plan)) class DimensionDataPublicIpBlock(object): """ DimensionData Public IP Block with location. """ def __init__(self, id, base_ip, size, location, network_domain, status): self.id = str(id) self.base_ip = base_ip self.size = size self.location = location self.network_domain = network_domain self.status = status def __repr__(self): return (('') % (self.id, self.base_ip, self.size, self.location, self.status)) class DimensionDataServerCpuSpecification(object): """ A class that represents the specification of the CPU(s) for a node """ def __init__(self, cpu_count, cores_per_socket, performance): """ Instantiate a new :class:`DimensionDataServerCpuSpecification` :param cpu_count: The number of CPUs :type cpu_count: ``int`` :param cores_per_socket: The number of cores per socket, the recommendation is 1 :type cores_per_socket: ``int`` :param performance: The performance type, e.g. HIGHPERFORMANCE :type performance: ``str`` """ self.cpu_count = cpu_count self.cores_per_socket = cores_per_socket self.performance = performance def __repr__(self): return (('') % (self.cpu_count, self.cores_per_socket, self.performance)) class DimensionDataServerDisk(object): """ A class that represents the disk on a server """ def __init__(self, id=None, scsi_id=None, size_gb=None, speed=None, state=None): """ Instantiate a new :class:`DimensionDataServerDisk` :param id: The id of the disk :type id: ``str`` :param scsi_id: Representation for scsi :type scsi_id: ``int`` :param size_gb: Size of the disk :type size_gb: ``int`` :param speed: Speed of the disk (i.e. STANDARD) :type speed: ``str`` :param state: State of the disk (i.e. PENDING) :type state: ``str`` """ self.id = id self.scsi_id = scsi_id self.size_gb = size_gb self.speed = speed self.state = state def __repr__(self): return (('') % (self.status, self.version_status, self.api_version)) class DimensionDataFirewallRule(object): """ DimensionData Firewall Rule for a network domain """ def __init__(self, id, name, action, location, network_domain, status, ip_version, protocol, source, destination, enabled): self.id = str(id) self.name = name self.action = action self.location = location self.network_domain = network_domain self.status = status self.ip_version = ip_version self.protocol = protocol self.source = source self.destination = destination self.enabled = enabled def __repr__(self): return (('') % (self.id, self.name, self.action, self.location, self.network_domain, self.status, self.ip_version, self.protocol, self.source, self.destination, self.enabled)) class DimensionDataFirewallAddress(object): """ The source or destination model in a firewall rule """ def __init__(self, any_ip, ip_address, ip_prefix_size, port_begin, port_end, address_list_id, port_list_id): self.any_ip = any_ip self.ip_address = ip_address self.ip_prefix_size = ip_prefix_size self.port_list_id = port_list_id self.port_begin = port_begin self.port_end = port_end self.address_list_id = address_list_id self.port_list_id = port_list_id def __repr__(self): return ( '' % (self.any_ip, self.ip_address, self.ip_prefix_size, self.port_begin, self.port_end, self.address_list_id, self.port_list_id)) class DimensionDataNatRule(object): """ An IP NAT rule in a network domain """ def __init__(self, id, network_domain, internal_ip, external_ip, status): self.id = id self.network_domain = network_domain self.internal_ip = internal_ip self.external_ip = external_ip self.status = status def __repr__(self): return (('') % (self.id, self.status)) class DimensionDataAntiAffinityRule(object): """ Anti-Affinity rule for DimensionData An Anti-Affinity rule ensures that servers in the rule will not reside on the same VMware ESX host. """ def __init__(self, id, node_list): """ Instantiate a new :class:`DimensionDataAntiAffinityRule` :param id: The ID of the Anti-Affinity rule :type id: ``str`` :param node_list: List of node ids that belong in this rule :type node_list: ``list`` of ``str`` """ self.id = id self.node_list = node_list def __repr__(self): return (('') % (self.id)) class DimensionDataVlan(object): """ DimensionData VLAN. """ def __init__(self, id, name, description, location, network_domain, status, private_ipv4_range_address, private_ipv4_range_size, ipv6_range_address, ipv6_range_size, ipv4_gateway, ipv6_gateway): """ Initialize an instance of ``DimensionDataVlan`` :param id: The ID of the VLAN :type id: ``str`` :param name: The name of the VLAN :type name: ``str`` :param description: Plan text description of the VLAN :type description: ``str`` :param location: The location (data center) of the VLAN :type location: ``NodeLocation`` :param network_domain: The Network Domain that owns this VLAN :type network_domain: :class:`DimensionDataNetworkDomain` :param status: The status of the VLAN :type status: :class:`DimensionDataStatus` :param private_ipv4_range_address: The host address of the VLAN IP space :type private_ipv4_range_address: ``str`` :param private_ipv4_range_size: The size (e.g. '24') of the VLAN as a CIDR range size :type private_ipv4_range_size: ``int`` :param ipv6_range_address: The host address of the VLAN IP space :type ipv6_range_address: ``str`` :param ipv6_range_size: The size (e.g. '32') of the VLAN as a CIDR range size :type ipv6_range_size: ``int`` :param ipv4_gateway: The IPv4 default gateway address :type ipv4_gateway: ``str`` :param ipv6_gateway: The IPv6 default gateway address :type ipv6_gateway: ``str`` """ self.id = str(id) self.name = name self.location = location self.description = description self.network_domain = network_domain self.status = status self.private_ipv4_range_address = private_ipv4_range_address self.private_ipv4_range_size = private_ipv4_range_size self.ipv6_range_address = ipv6_range_address self.ipv6_range_size = ipv6_range_size self.ipv4_gateway = ipv4_gateway self.ipv6_gateway = ipv6_gateway def __repr__(self): return (('') % (self.id, self.name, self.description, self.location, self.status)) class DimensionDataPool(object): """ DimensionData VIP Pool. """ def __init__(self, id, name, description, status, load_balance_method, health_monitor_id, service_down_action, slow_ramp_time): """ Initialize an instance of ``DimensionDataPool`` :param id: The ID of the pool :type id: ``str`` :param name: The name of the pool :type name: ``str`` :param description: Plan text description of the pool :type description: ``str`` :param status: The status of the pool :type status: :class:`DimensionDataStatus` :param load_balance_method: The load balancer method :type load_balance_method: ``str`` :param health_monitor_id: The ID of the health monitor :type health_monitor_id: ``str`` :param service_down_action: Action to take when pool is down :type service_down_action: ``str`` :param slow_ramp_time: The ramp-up time for service recovery :type slow_ramp_time: ``int`` """ self.id = str(id) self.name = name self.description = description self.status = status self.load_balance_method = load_balance_method self.health_monitor_id = health_monitor_id self.service_down_action = service_down_action self.slow_ramp_time = slow_ramp_time def __repr__(self): return (('') % (self.id, self.name, self.description, self.status)) class DimensionDataPoolMember(object): """ DimensionData VIP Pool Member. """ def __init__(self, id, name, status, ip, port, node_id): """ Initialize an instance of ``DimensionDataPoolMember`` :param id: The ID of the pool member :type id: ``str`` :param name: The name of the pool member :type name: ``str`` :param status: The status of the pool :type status: :class:`DimensionDataStatus` :param ip: The IP of the pool member :type ip: ``str`` :param port: The port of the pool member :type port: ``int`` :param node_id: The ID of the associated node :type node_id: ``str`` """ self.id = str(id) self.name = name self.status = status self.ip = ip self.port = port self.node_id = node_id def __repr__(self): return (('') % (self.id, self.name, self.ip, self.status, self.port, self.node_id)) class DimensionDataVIPNode(object): def __init__(self, id, name, status, ip, connection_limit='10000', connection_rate_limit='10000'): """ Initialize an instance of :class:`DimensionDataVIPNode` :param id: The ID of the node :type id: ``str`` :param name: The name of the node :type name: ``str`` :param status: The status of the node :type status: :class:`DimensionDataStatus` :param ip: The IP of the node :type ip: ``str`` :param connection_limit: The total connection limit for the node :type connection_limit: ``int`` :param connection_rate_limit: The rate limit for the node :type connection_rate_limit: ``int`` """ self.id = str(id) self.name = name self.status = status self.ip = ip self.connection_limit = connection_limit self.connection_rate_limit = connection_rate_limit def __repr__(self): return (('') % (self.id, self.name, self.status, self.ip)) class DimensionDataVirtualListener(object): """ DimensionData Virtual Listener. """ def __init__(self, id, name, status, ip): """ Initialize an instance of :class:`DimensionDataVirtualListener` :param id: The ID of the listener :type id: ``str`` :param name: The name of the listener :type name: ``str`` :param status: The status of the listener :type status: :class:`DimensionDataStatus` :param ip: The IP of the listener :type ip: ``str`` """ self.id = str(id) self.name = name self.status = status self.ip = ip def __repr__(self): return (('') % (self.id, self.name, self.status, self.ip)) class DimensionDataDefaultHealthMonitor(object): """ A default health monitor for a VIP (node, pool or listener) """ def __init__(self, id, name, node_compatible, pool_compatible): """ Initialize an instance of :class:`DimensionDataDefaultHealthMonitor` :param id: The ID of the monitor :type id: ``str`` :param name: The name of the monitor :type name: ``str`` :param node_compatible: Is a monitor capable of monitoring nodes :type node_compatible: ``bool`` :param pool_compatible: Is a monitor capable of monitoring pools :type pool_compatible: ``bool`` """ self.id = id self.name = name self.node_compatible = node_compatible self.pool_compatible = pool_compatible def __repr__(self): return (('') % (self.id, self.name)) class DimensionDataPersistenceProfile(object): """ Each Persistence Profile declares the combination of Virtual Listener type and protocol with which it is compatible and whether or not it is compatible as a Fallback Persistence Profile. """ def __init__(self, id, name, compatible_listeners, fallback_compatible): """ Initialize an instance of :class:`DimensionDataPersistenceProfile` :param id: The ID of the profile :type id: ``str`` :param name: The name of the profile :type name: ``str`` :param compatible_listeners: List of compatible Virtual Listener types :type compatible_listeners: ``list`` of :class:`DimensionDataVirtualListenerCompatibility` :param fallback_compatible: Is capable as a fallback profile :type fallback_compatible: ``bool`` """ self.id = id self.name = name self.compatible_listeners = compatible_listeners self.fallback_compatible = fallback_compatible def __repr__(self): return (('') % (self.id, self.name)) class DimensionDataDefaultiRule(object): """ A default iRule for a network domain, can be applied to a listener """ def __init__(self, id, name, compatible_listeners): """ Initialize an instance of :class:`DimensionDataDefaultiRule` :param id: The ID of the iRule :type id: ``str`` :param name: The name of the iRule :type name: ``str`` :param compatible_listeners: List of compatible Virtual Listener types :type compatible_listeners: ``list`` of :class:`DimensionDataVirtualListenerCompatibility` """ self.id = id self.name = name self.compatible_listeners = compatible_listeners def __repr__(self): return (('') % (self.id, self.name)) class DimensionDataVirtualListenerCompatibility(object): """ A compatibility preference for a persistence profile or iRule specifies which virtual listener types this profile or iRule can be applied to. """ def __init__(self, type, protocol): self.type = type self.protocol = protocol def __repr__(self): return (('') % (self.type, self.protocol)) class DimensionDataBackupDetails(object): """ Dimension Data Backup Details represents information about a targets backups configuration """ def __init__(self, asset_id, service_plan, status, clients=None): """ Initialize an instance of :class:`DimensionDataBackupDetails` :param asset_id: Asset identification for backups :type asset_id: ``str`` :param service_plan: The service plan for backups. i.e (Essentials) :type service_plan: ``str`` :param status: The overall status this backup target. i.e. (unregistered) :type status: ``str`` :param clients: Backup clients attached to this target :type clients: ``list`` of :class:`DimensionDataBackupClient` """ self.asset_id = asset_id self.service_plan = service_plan self.status = status self.clients = clients def __repr__(self): return (('') % (self.asset_id)) class DimensionDataBackupClient(object): """ An object that represents a backup client """ def __init__(self, id, type, status, schedule_policy, storage_policy, download_url, alert=None, running_job=None): """ Initialize an instance of :class:`DimensionDataBackupClient` :param id: Unique ID for the client :type id: ``str`` :param type: The type of client that this client is :type type: :class:`DimensionDataBackupClientType` :param status: The states of this particular backup client. i.e. (Unregistered) :type status: ``str`` :param schedule_policy: The schedule policy for this client NOTE: Dimension Data only sends back the name of the schedule policy, no further details :type schedule_policy: ``str`` :param storage_policy: The storage policy for this client NOTE: Dimension Data only sends back the name of the storage policy, no further details :type storage_policy: ``str`` :param download_url: The download url for this client :type download_url: ``str`` :param alert: The alert configured for this backup client (optional) :type alert: :class:`DimensionDataBackupClientAlert` :param alert: The running job for the client (optional) :type alert: :class:`DimensionDataBackupClientRunningJob` """ self.id = id self.type = type self.status = status self.schedule_policy = schedule_policy self.storage_policy = storage_policy self.download_url = download_url self.alert = alert self.running_job = running_job def __repr__(self): return (('') % (self.id)) class DimensionDataBackupClientAlert(object): """ An alert for a backup client """ def __init__(self, trigger, notify_list=[]): """ Initialize an instance of :class:`DimensionDataBackupClientAlert` :param trigger: Trigger type for the client i.e. ON_FAILURE :type trigger: ``str`` :param notify_list: List of email addresses that are notified when the alert is fired :type notify_list: ``list`` of ``str`` """ self.trigger = trigger self.notify_list = notify_list def __repr__(self): return (('') % (self.trigger)) class DimensionDataBackupClientRunningJob(object): """ A running job for a given backup client """ def __init__(self, id, status, percentage=0): """ Initialize an instance of :class:`DimensionDataBackupClientRunningJob` :param id: The unqiue ID of the job :type id: ``str`` :param status: The status of the job i.e. Waiting :type status: ``str`` :param percentage: The percentage completion of the job :type percentage: ``int`` """ self.id = id self.percentage = percentage self.status = status def __repr__(self): return (('') % (self.id)) class DimensionDataBackupClientType(object): """ A client type object for backups """ def __init__(self, type, is_file_system, description): """ Initialize an instance of :class:`DimensionDataBackupClientType` :param type: The type of client i.e. (FA.Linux, MySQL, ect.) :type type: ``str`` :param is_file_system: The name of the iRule :type is_file_system: ``bool`` :param description: Description of the client :type description: ``str`` """ self.type = type self.is_file_system = is_file_system self.description = description def __repr__(self): return (('') % (self.type)) class DimensionDataBackupStoragePolicy(object): """ A representation of a storage policy """ def __init__(self, name, retention_period, secondary_location): """ Initialize an instance of :class:`DimensionDataBackupStoragePolicy` :param name: The name of the storage policy i.e. 14 Day Storage Policy :type name: ``str`` :param retention_period: How long to keep the backup in days :type retention_period: ``int`` :param secondary_location: The secondary location i.e. Primary :type secondary_location: ``str`` """ self.name = name self.retention_period = retention_period self.secondary_location = secondary_location def __repr__(self): return (('') % (self.name)) class DimensionDataBackupSchedulePolicy(object): """ A representation of a schedule policy """ def __init__(self, name, description): """ Initialize an instance of :class:`DimensionDataBackupSchedulePolicy` :param name: The name of the policy i.e 12AM - 6AM :type name: ``str`` :param description: Short summary of the details of the policy :type description: ``str`` """ self.name = name self.description = description def __repr__(self): return (('') % (self.name)) class DimensionDataTag(object): """ A representation of a Tag in Dimension Data A Tag first must have a Tag Key, then an asset is tag with a key and an option value. Tags can be queried later to filter assets and also show up on usage report if so desired. """ def __init__(self, asset_type, asset_id, asset_name, datacenter, key, value): """ Initialize an instance of :class:`DimensionDataTag` :param asset_type: The type of asset. Current asset types: SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE, PUBLIC_IP_BLOCK, ACCOUNT :type asset_type: ``str`` :param asset_id: The GUID of the asset that is tagged :type asset_id: ``str`` :param asset_name: The name of the asset that is tagged :type asset_name: ``str`` :param datacenter: The short datacenter name of the tagged asset :type datacenter: ``str`` :param key: The tagged key :type key: :class:`DimensionDataTagKey` :param value: The tagged value :type value: ``None`` or ``str`` """ self.asset_type = asset_type self.asset_id = asset_id self.asset_name = asset_name self.datacenter = datacenter self.key = key self.value = value def __repr__(self): return (('') % (self.asset_name, self.key.name, self.value)) class DimensionDataTagKey(object): """ A representation of a Tag Key in Dimension Data A tag key is required to tag an asset """ def __init__(self, id, name, description, value_required, display_on_report): """ Initialize an instance of :class:`DimensionDataTagKey` :param id: GUID of the tag key :type id: ``str`` :param name: Name of the tag key :type name: ``str`` :param description: Description of the tag key :type description: ``str`` :param value_required: If a value is required for this tag key :type value_required: ``bool`` :param display_on_report: If this tag key should be displayed on usage reports :type display_on_report: ``bool`` """ self.id = id self.name = name self.description = description self.value_required = value_required self.display_on_report = display_on_report def __repr__(self): return (('') % (self.name)) class DimensionDataIpAddressList(object): """ DimensionData IP Address list """ def __init__(self, id, name, description, ip_version, ip_address_collection, state, create_time, child_ip_address_lists=None): """" Initialize an instance of :class:`DimensionDataIpAddressList` :param id: GUID of the IP Address List key :type id: ``str`` :param name: Name of the IP Address List :type name: ``str`` :param description: Description of the IP Address List :type description: ``str`` :param ip_version: IP version. E.g. IPV4, IPV6 :type ip_version: ``str`` :param ip_address_collection: Collection of DimensionDataIpAddress :type ip_address_collection: ``List`` :param state: IP Address list state :type state: ``str`` :param create_time: IP Address List created time :type create_time: ``date time`` :param child_ip_address_lists: List of IP address list to be included :type child_ip_address_lists: List of :class:'DimensionDataIpAddressList' """ self.id = id self.name = name self.description = description self.ip_version = ip_version self.ip_address_collection = ip_address_collection self.state = state self.create_time = create_time self.child_ip_address_lists = child_ip_address_lists def __repr__(self): return ('' % (self.id, self.name, self.description, self.ip_version, self.ip_address_collection, self.state, self.create_time, self.child_ip_address_lists)) class DimensionDataChildIpAddressList(object): """ DimensionData Child IP Address list """ def __init__(self, id, name): """" Initialize an instance of :class:`DimensionDataChildIpAddressList` :param id: GUID of the IP Address List key :type id: ``str`` :param name: Name of the IP Address List :type name: ``str`` """ self.id = id self.name = name def __repr__(self): return ('' % (self.id, self.name)) class DimensionDataIpAddress(object): """ A representation of IP Address in Dimension Data """ def __init__(self, begin, end=None, prefix_size=None): """ Initialize an instance of :class:`DimensionDataIpAddress` :param begin: IP Address Begin :type begin: ``str`` :param end: IP Address end :type end: ``str`` :param prefixSize: IP Address prefix size :type prefixSize: ``int`` """ self.begin = begin self.end = end self.prefix_size = prefix_size def __repr__(self): return ('' % (self.begin, self.end, self.prefix_size)) class DimensionDataPortList(object): """ DimensionData Port list """ def __init__(self, id, name, description, port_collection, child_portlist_list, state, create_time): """" Initialize an instance of :class:`DimensionDataPortList` :param id: GUID of the Port List key :type id: ``str`` :param name: Name of the Port List :type name: ``str`` :param description: Description of the Port List :type description: ``str`` :param port_collection: Collection of DimensionDataPort :type port_collection: ``List`` :param child_portlist_list: Collection of DimensionDataChildPort :type child_portlist_list: ``List`` :param state: Port list state :type state: ``str`` :param create_time: Port List created time :type create_time: ``date time`` """ self.id = id self.name = name self.description = description self.port_collection = port_collection self.child_portlist_list = child_portlist_list self.state = state self.create_time = create_time def __repr__(self): return ( "" % (self.id, self.name, self.description, self.port_collection, self.child_portlist_list, self.state, self.create_time)) class DimensionDataChildPortList(object): """ DimensionData Child Port list """ def __init__(self, id, name): """" Initialize an instance of :class:`DimensionDataChildIpAddressList` :param id: GUID of the child port list key :type id: ``str`` :param name: Name of the child port List :type name: ``str`` """ self.id = id self.name = name def __repr__(self): return ('' % (self.id, self.name)) class DimensionDataPort(object): """ A representation of Port in Dimension Data """ def __init__(self, begin, end=None): """ Initialize an instance of :class:`DimensionDataPort` :param begin: Port Number Begin :type begin: ``str`` :param end: Port Number end :type end: ``str`` """ self.begin = begin self.end = end def __repr__(self): return ('' % (self.begin, self.end)) class DimensionDataNic(object): """ A representation of Network Adapter in Dimension Data """ def __init__(self, private_ip_v4=None, vlan=None, network_adapter_name=None): """ Initialize an instance of :class:`DimensionDataNic` :param private_ip_v4: IPv4 :type private_ip_v4: ``str`` :param vlan: Network VLAN :type vlan: class: DimensionDataVlan or ``str`` :param network_adapter_name: Network Adapter Name :type network_adapter_name: ``str`` """ self.private_ip_v4 = private_ip_v4 self.vlan = vlan self.network_adapter_name = network_adapter_name def __repr__(self): return ('' % (self.private_ip_v4, self.vlan, self.network_adapter_name)) apache-libcloud-2.2.1/libcloud/dns/0000775000175000017500000000000013160535107017000 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/dns/providers.py0000664000175000017500000000733513153541406021400 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.dns.types import Provider from libcloud.dns.types import OLD_CONSTANT_TO_NEW_MAPPING from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver __all__ = [ 'DRIVERS', 'get_driver', 'set_driver' ] DRIVERS = { Provider.DUMMY: ('libcloud.dns.drivers.dummy', 'DummyDNSDriver'), Provider.LINODE: ('libcloud.dns.drivers.linode', 'LinodeDNSDriver'), Provider.ZERIGO: ('libcloud.dns.drivers.zerigo', 'ZerigoDNSDriver'), Provider.RACKSPACE: ('libcloud.dns.drivers.rackspace', 'RackspaceDNSDriver'), Provider.HOSTVIRTUAL: ('libcloud.dns.drivers.hostvirtual', 'HostVirtualDNSDriver'), Provider.ROUTE53: ('libcloud.dns.drivers.route53', 'Route53DNSDriver'), Provider.GANDI: ('libcloud.dns.drivers.gandi', 'GandiDNSDriver'), Provider.GOOGLE: ('libcloud.dns.drivers.google', 'GoogleDNSDriver'), Provider.SOFTLAYER: ('libcloud.dns.drivers.softlayer', 'SoftLayerDNSDriver'), Provider.DIGITAL_OCEAN: ('libcloud.dns.drivers.digitalocean', 'DigitalOceanDNSDriver'), Provider.WORLDWIDEDNS: ('libcloud.dns.drivers.worldwidedns', 'WorldWideDNSDriver'), Provider.DNSIMPLE: ('libcloud.dns.drivers.dnsimple', 'DNSimpleDNSDriver'), Provider.POINTDNS: ('libcloud.dns.drivers.pointdns', 'PointDNSDriver'), Provider.VULTR: ('libcloud.dns.drivers.vultr', 'VultrDNSDriver'), Provider.LIQUIDWEB: ('libcloud.dns.drivers.liquidweb', 'LiquidWebDNSDriver'), Provider.ZONOMI: ('libcloud.dns.drivers.zonomi', 'ZonomiDNSDriver'), Provider.DURABLEDNS: ('libcloud.dns.drivers.durabledns', 'DurableDNSDriver'), Provider.AURORADNS: ('libcloud.dns.drivers.auroradns', 'AuroraDNSDriver'), Provider.GODADDY: ('libcloud.dns.drivers.godaddy', 'GoDaddyDNSDriver'), Provider.CLOUDFLARE: ('libcloud.dns.drivers.cloudflare', 'CloudFlareDNSDriver'), Provider.NFSN: ('libcloud.dns.drivers.nfsn', 'NFSNDNSDriver'), Provider.NSONE: ('libcloud.dns.drivers.nsone', 'NsOneDNSDriver'), Provider.LUADNS: ('libcloud.dns.drivers.luadns', 'LuadnsDNSDriver'), Provider.BUDDYNS: ('libcloud.dns.drivers.buddyns', 'BuddyNSDNSDriver'), Provider.POWERDNS: ('libcloud.dns.drivers.powerdns', 'PowerDNSDriver'), Provider.ONAPP: ('libcloud.dns.drivers.onapp', 'OnAppDNSDriver'), # Deprecated Provider.RACKSPACE_US: ('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'), Provider.RACKSPACE_UK: ('libcloud.dns.drivers.rackspace', 'RackspaceUKDNSDriver') } def get_driver(provider): deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING return _get_provider_driver(drivers=DRIVERS, provider=provider, deprecated_constants=deprecated_constants) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/dns/__init__.py0000664000175000017500000000000012701023453021073 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/dns/types.py0000664000175000017500000000770613153541406020531 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.types import LibcloudError __all__ = [ 'Provider', 'RecordType', 'ZoneError', 'ZoneDoesNotExistError', 'ZoneAlreadyExistsError', 'RecordError', 'RecordDoesNotExistError', 'RecordAlreadyExistsError', 'OLD_CONSTANT_TO_NEW_MAPPING' ] class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. """ DUMMY = 'dummy' AURORADNS = 'auroradns' BUDDYNS = 'buddyns' CLOUDFLARE = 'cloudflare' DIGITAL_OCEAN = 'digitalocean' DNSIMPLE = 'dnsimple' DURABLEDNS = 'durabledns' GANDI = 'gandi' GODADDY = 'godaddy' GOOGLE = 'google' HOSTVIRTUAL = 'hostvirtual' LINODE = 'linode' LIQUIDWEB = 'liquidweb' LUADNS = 'luadns' NFSN = 'nfsn' NSONE = 'nsone' ONAPP = 'onapp' POINTDNS = 'pointdns' POWERDNS = 'powerdns' RACKSPACE = 'rackspace' ROUTE53 = 'route53' SOFTLAYER = 'softlayer' VULTR = 'vultr' WORLDWIDEDNS = 'worldwidedns' ZERIGO = 'zerigo' ZONOMI = 'zonomi' DNSPOD = 'dnspod' # Deprecated RACKSPACE_US = 'rackspace_us' RACKSPACE_UK = 'rackspace_uk' OLD_CONSTANT_TO_NEW_MAPPING = { Provider.RACKSPACE_US: Provider.RACKSPACE, Provider.RACKSPACE_UK: Provider.RACKSPACE, } class RecordType(object): """ DNS record type. """ A = 'A' AAAA = 'AAAA' AFSDB = 'A' ALIAS = 'ALIAS' CERT = 'CERT' CNAME = 'CNAME' DNAME = 'DNAME' DNSKEY = 'DNSKEY' DS = 'DS' GEO = 'GEO' HINFO = 'HINFO' KEY = 'KEY' LOC = 'LOC' MX = 'MX' NAPTR = 'NAPTR' NS = 'NS' NSEC = 'NSEC' OPENPGPKEY = 'OPENPGPKEY' PTR = 'PTR' REDIRECT = 'REDIRECT' RP = 'RP' RRSIG = 'RRSIG' SOA = 'SOA' SPF = 'SPF' SRV = 'SRV' SSHFP = 'SSHFP' TLSA = 'TLSA' TXT = 'TXT' URL = 'URL' WKS = 'WKS' class ZoneError(LibcloudError): error_type = 'ZoneError' kwargs = ('zone_id', ) def __init__(self, value, driver, zone_id): self.zone_id = zone_id super(ZoneError, self).__init__(value=value, driver=driver) def __str__(self): return self.__repr__() def __repr__(self): return ('<%s in %s, zone_id=%s, value=%s>' % (self.error_type, repr(self.driver), self.zone_id, self.value)) class ZoneDoesNotExistError(ZoneError): error_type = 'ZoneDoesNotExistError' class ZoneAlreadyExistsError(ZoneError): error_type = 'ZoneAlreadyExistsError' class RecordError(LibcloudError): error_type = 'RecordError' def __init__(self, value, driver, record_id): self.record_id = record_id super(RecordError, self).__init__(value=value, driver=driver) def __str__(self): return self.__repr__() def __repr__(self): return ('<%s in %s, record_id=%s, value=%s>' % (self.error_type, repr(self.driver), self.record_id, self.value)) class RecordDoesNotExistError(RecordError): error_type = 'RecordDoesNotExistError' class RecordAlreadyExistsError(RecordError): error_type = 'RecordAlreadyExistsError' apache-libcloud-2.2.1/libcloud/dns/drivers/0000775000175000017500000000000013160535107020456 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/dns/drivers/liquidweb.py0000664000175000017500000003150212705475361023027 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Liquid Web DNS Driver """ import sys try: import simplejson as json except ImportError: import json from libcloud.common.liquidweb import LiquidWebResponse, LiquidWebConnection from libcloud.common.liquidweb import APIException from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.types import RecordAlreadyExistsError __all__ = [ 'LiquidWebDNSDriver' ] class LiquidWebDNSResponse(LiquidWebResponse): pass class LiquidWebDNSConnection(LiquidWebConnection): responseCls = LiquidWebDNSResponse class LiquidWebDNSDriver(DNSDriver): type = Provider.LIQUIDWEB name = 'Liquidweb DNS' website = 'https://www.liquidweb.com' connectionCls = LiquidWebDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ action = '/v1/Network/DNS/Zone/list' response = self.connection.request(action=action, method='POST') zones = self._to_zones(response.objects[0]) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/v1/Network/DNS/Record/list' data = json.dumps({'params': {'zone_id': zone.id}}) response = self.connection.request(action=action, method='POST', data=data) records = self._to_records(response.objects[0], zone=zone) return records def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ action = '/v1/Network/DNS/Zone/details' data = json.dumps({'params': {'id': zone_id}}) try: response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::RecordNotFound': raise ZoneDoesNotExistError(zone_id=zone_id, value=e.value, driver=self) else: raise e zone = self._to_zone(response.objects[0]) return zone def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ zone = self.get_zone(zone_id=zone_id) action = '/v1/Network/DNS/Record/details' data = json.dumps({'params': {'id': record_id}}) try: response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::RecordNotFound': raise RecordDoesNotExistError(record_id=record_id, driver=self, value=e.value) else: raise e record = self._to_record(response.objects[0], zone=zone) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (This is not really used. See API docs for extra parameters). :type type: ``str`` :param ttl: TTL for new records. (This is not really used) :type ttl: ``int`` :param extra: Extra attributes (driver specific). ('region_support', 'zone_data') :type extra: ``dict`` :rtype: :class:`Zone` For more info, please see: https://www.liquidweb.com/storm/api/docs/v1/Network/DNS/Zone.html """ action = '/v1/Network/DNS/Zone/create' data = {'params': {'name': domain}} if extra is not None: data['params'].update(extra) try: data = json.dumps(data) response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::DuplicateRecord': raise ZoneAlreadyExistsError(zone_id=domain, value=e.value, driver=self) else: raise e zone = self._to_zone(response.objects[0]) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone which the records will be created for. :type zone: :class:`Zone` :param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes ('prio', 'ttl'). :type extra: ``dict`` :rtype: :class:`Record` """ action = '/v1/Network/DNS/Record/create' to_post = {'params': {'name': name, 'rdata': data, 'type': type, 'zone': zone.domain, 'zone_id': zone.id } } if extra is not None: to_post['params'].update(extra) data = json.dumps(to_post) try: response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::DuplicateRecord': raise RecordAlreadyExistsError(record_id=name, value=e.value, driver=self) else: raise e record = self._to_record(response.objects[0], zone=zone) return record def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes ('name', 'rdata', 'prio', 'ttl'). :type extra: ``dict`` :rtype: :class:`Record` """ zone = record.zone action = '/v1/Network/DNS/Record/update' to_post = {'params': {'id': int(record.id), 'name': name, 'rdata': data}} if extra is not None: to_post['params'].update(extra) j_data = json.dumps(to_post) try: response = self.connection.request(action=action, method='PUT', data=j_data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::RecordNotFound': raise RecordDoesNotExistError(record_id=record.id, driver=self, value=e.value) else: raise e record = self._to_record(response.objects[0], zone=zone) return record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ action = '/v1/Network/DNS/Zone/delete' data = json.dumps({'params': {'id': zone.id}}) try: response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::RecordNotFound': raise ZoneDoesNotExistError(zone_id=zone.id, value=e.value, driver=self) else: raise e return zone.domain in response.objects def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ action = '/v1/Network/DNS/Record/delete' data = json.dumps({'params': {'id': record.id}}) try: response = self.connection.request(action=action, method='POST', data=data) except APIException: e = sys.exc_info()[1] if e.error_class == 'LW::Exception::RecordNotFound': raise RecordDoesNotExistError(record_id=record.id, driver=self, value=e.value) else: raise e return record.id in response.objects def _to_zone(self, item): common_attr = ['id', 'name', 'type'] extra = {} for key in item: if key not in common_attr: extra[key] = item.get(key) zone = Zone(domain=item['name'], id=item['id'], type=item['type'], ttl=None, driver=self, extra=extra) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): common_attr = ['id', 'rdata', 'name', 'type'] extra = {} for key in item: if key not in common_attr: extra[key] = item.get(key) record = Record(id=item['id'], name=item['name'], type=item['type'], data=item['rdata'], zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/gandi.py0000664000175000017500000002174012701023453022112 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement __all__ = [ 'GandiDNSDriver' ] from libcloud.common.gandi import BaseGandiDriver, GandiConnection from libcloud.common.gandi import GandiResponse from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import RecordError from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record TTL_MIN = 30 TTL_MAX = 2592000 # 30 days class NewZoneVersion(object): """ Changes to a zone in the Gandi DNS service need to be wrapped in a new version object. The changes are made to the new version, then that version is made active. In effect, this is a transaction. Any calls made inside this context manager will be applied to a new version id. If your changes are successful (and only if they are successful) they are activated. """ def __init__(self, driver, zone): self.driver = driver self.connection = driver.connection self.zone = zone def __enter__(self): zid = int(self.zone.id) self.connection.set_context({'zone_id': self.zone.id}) vid = self.connection.request('domain.zone.version.new', zid).object self.vid = vid return vid def __exit__(self, type, value, traceback): if not traceback: zid = int(self.zone.id) con = self.connection con.set_context({'zone_id': self.zone.id}) con.request('domain.zone.version.set', zid, self.vid).object class GandiDNSResponse(GandiResponse): exceptions = { 581042: ZoneDoesNotExistError, } class GandiDNSConnection(GandiConnection): responseCls = GandiDNSResponse class GandiDNSDriver(BaseGandiDriver, DNSDriver): """ API reference can be found at: http://doc.rpc.gandi.net/domain/reference.html """ type = Provider.GANDI name = 'Gandi DNS' website = 'http://www.gandi.net/domain' connectionCls = GandiDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.LOC: 'LOC', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', RecordType.WKS: 'WKS', } def _to_zone(self, zone): return Zone( id=str(zone['id']), domain=zone['name'], type='master', ttl=0, driver=self, extra={} ) def _to_zones(self, zones): ret = [] for z in zones: ret.append(self._to_zone(z)) return ret def list_zones(self): zones = self.connection.request('domain.zone.list') return self._to_zones(zones.object) def get_zone(self, zone_id): zid = int(zone_id) self.connection.set_context({'zone_id': zone_id}) zone = self.connection.request('domain.zone.info', zid) return self._to_zone(zone.object) def create_zone(self, domain, type='master', ttl=None, extra=None): params = { 'name': domain, } info = self.connection.request('domain.zone.create', params) return self._to_zone(info.object) def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): zid = int(zone.id) params = {'name': domain} self.connection.set_context({'zone_id': zone.id}) zone = self.connection.request('domain.zone.update', zid, params) return self._to_zone(zone.object) def delete_zone(self, zone): zid = int(zone.id) self.connection.set_context({'zone_id': zone.id}) res = self.connection.request('domain.zone.delete', zid) return res.object def _to_record(self, record, zone): extra = {'ttl': int(record['ttl'])} value = record['value'] if record['type'] == 'MX': # Record is in the following form: # # e.g. 15 aspmx.l.google.com split = record['value'].split(' ') extra['priority'] = int(split[0]) value = split[1] return Record( id='%s:%s' % (record['type'], record['name']), name=record['name'], type=self._string_to_record_type(record['type']), data=value, zone=zone, driver=self, ttl=record['ttl'], extra=extra) def _to_records(self, records, zone): retval = [] for r in records: retval.append(self._to_record(r, zone)) return retval def list_records(self, zone): zid = int(zone.id) self.connection.set_context({'zone_id': zone.id}) records = self.connection.request('domain.zone.record.list', zid, 0) return self._to_records(records.object, zone) def get_record(self, zone_id, record_id): zid = int(zone_id) record_type, name = record_id.split(':', 1) filter_opts = { 'name': name, 'type': record_type } self.connection.set_context({'zone_id': zone_id}) records = self.connection.request('domain.zone.record.list', zid, 0, filter_opts).object if len(records) == 0: raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) return self._to_record(records[0], self.get_zone(zone_id)) def _validate_record(self, record_id, name, record_type, data, extra): if len(data) > 1024: raise RecordError('Record data must be <= 1024 characters', driver=self, record_id=record_id) if extra and 'ttl' in extra: if extra['ttl'] < TTL_MIN: raise RecordError('TTL must be at least 30 seconds', driver=self, record_id=record_id) if extra['ttl'] > TTL_MAX: raise RecordError('TTL must not excdeed 30 days', driver=self, record_id=record_id) def create_record(self, name, zone, type, data, extra=None): self._validate_record(None, name, type, data, extra) zid = int(zone.id) create = { 'name': name, 'type': self.RECORD_TYPE_MAP[type], 'value': data } if 'ttl' in extra: create['ttl'] = extra['ttl'] with NewZoneVersion(self, zone) as vid: con = self.connection con.set_context({'zone_id': zone.id}) rec = con.request('domain.zone.record.add', zid, vid, create).object return self._to_record(rec, zone) def update_record(self, record, name, type, data, extra): self._validate_record(record.id, name, type, data, extra) filter_opts = { 'name': record.name, 'type': self.RECORD_TYPE_MAP[record.type] } update = { 'name': name, 'type': self.RECORD_TYPE_MAP[type], 'value': data } if 'ttl' in extra: update['ttl'] = extra['ttl'] zid = int(record.zone.id) with NewZoneVersion(self, record.zone) as vid: con = self.connection con.set_context({'zone_id': record.zone.id}) con.request('domain.zone.record.delete', zid, vid, filter_opts) res = con.request('domain.zone.record.add', zid, vid, update).object return self._to_record(res, record.zone) def delete_record(self, record): zid = int(record.zone.id) filter_opts = { 'name': record.name, 'type': self.RECORD_TYPE_MAP[record.type] } with NewZoneVersion(self, record.zone) as vid: con = self.connection con.set_context({'zone_id': record.zone.id}) count = con.request('domain.zone.record.delete', zid, vid, filter_opts).object if count == 1: return True raise RecordDoesNotExistError(value='No such record', driver=self, record_id=record.id) apache-libcloud-2.2.1/libcloud/dns/drivers/__init__.py0000664000175000017500000000000012701023453022551 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/dns/drivers/hostvirtual.py0000664000175000017500000002227212701023453023415 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'HostVirtualDNSDriver' ] import sys try: import simplejson as json except: import json from libcloud.utils.py3 import httplib from libcloud.utils.misc import merge_valid_keys, get_new_obj from libcloud.common.hostvirtual import HostVirtualResponse from libcloud.common.hostvirtual import HostVirtualConnection from libcloud.compute.drivers.hostvirtual import API_ROOT from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl'] class HostVirtualDNSResponse(HostVirtualResponse): def parse_error(self): context = self.connection.context status = int(self.status) if status == httplib.NOT_FOUND: if context['resource'] == 'zone': raise ZoneDoesNotExistError( value=self.parse_body()['error']['message'], driver=self, zone_id=context['id']) elif context['resource'] == 'record': raise RecordDoesNotExistError( value=self.parse_body()['error']['message'], driver=self, record_id=context['id']) super(HostVirtualDNSResponse, self).parse_error() return self.body class HostVirtualDNSConnection(HostVirtualConnection): responseCls = HostVirtualDNSResponse class HostVirtualDNSDriver(DNSDriver): type = Provider.HOSTVIRTUAL name = 'Host Virtual DNS' website = 'https://www.hostvirtual.com/' connectionCls = HostVirtualDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } def __init__(self, key, secure=True, host=None, port=None): super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure, host=host, port=port) def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_zone(self, item): extra = {} if 'records' in item: extra['records'] = item['records'] if item['type'] == 'NATIVE': item['type'] = 'master' zone = Zone(id=item['id'], domain=item['name'], type=item['type'], ttl=item['ttl'], driver=self, extra=extra) return zone def _to_records(self, items, zone=None): records = [] for item in items: records.append(self._to_record(item=item, zone=zone)) return records def _to_record(self, item, zone=None): extra = {'ttl': item['ttl']} type = self._string_to_record_type(item['type']) name = item['name'][:-len(zone.domain) - 1] record = Record(id=item['id'], name=name, type=type, data=item['content'], zone=zone, driver=self, ttl=item['ttl'], extra=extra) return record def list_zones(self): result = self.connection.request( API_ROOT + '/dns/zones/').object zones = self._to_zones(result) return zones def list_records(self, zone): params = {'id': zone.id} self.connection.set_context({'resource': 'zone', 'id': zone.id}) try: result = self.connection.request( API_ROOT + '/dns/records/', params=params).object except ZoneDoesNotExistError: e = sys.exc_info()[1] if 'Not Found: No Records Found' in e.value: return [] raise e records = self._to_records(items=result, zone=zone) return records def get_zone(self, zone_id): params = {'id': zone_id} self.connection.set_context({'resource': 'zone', 'id': zone_id}) result = self.connection.request( API_ROOT + '/dns/zone/', params=params).object if 'id' not in result: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) zone = self._to_zone(result) return zone def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) params = {'id': record_id} self.connection.set_context({'resource': 'record', 'id': record_id}) result = self.connection.request( API_ROOT + '/dns/record/', params=params).object if 'id' not in result: raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) record = self._to_record(item=result, zone=zone) return record def delete_zone(self, zone): params = {'id': zone.id} self.connection.set_context({'resource': 'zone', 'id': zone.id}) result = self.connection.request( API_ROOT + '/dns/zone/', params=params, method='DELETE').object return bool(result) def delete_record(self, record): params = {'id': record.id} self.connection.set_context({'resource': 'record', 'id': record.id}) result = self.connection.request( API_ROOT + '/dns/record/', params=params, method='DELETE').object return bool(result) def create_zone(self, domain, type='NATIVE', ttl=None, extra=None): if type == 'master': type = 'NATIVE' elif type == 'slave': type = 'SLAVE' params = {'name': domain, 'type': type, 'ttl': ttl} result = self.connection.request( API_ROOT + '/dns/zone/', data=json.dumps(params), method='POST').object extra = { 'soa': result['soa'], 'ns': result['ns'] } zone = Zone(id=result['id'], domain=domain, type=type, ttl=ttl, extra=extra, driver=self) return zone def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): params = {'id': zone.id} if domain: params['name'] = domain if type: params['type'] = type self.connection.set_context({'resource': 'zone', 'id': zone.id}) self.connection.request(API_ROOT + '/dns/zone/', data=json.dumps(params), method='PUT').object updated_zone = get_new_obj( obj=zone, klass=Zone, attributes={ 'domain': domain, 'type': type, 'ttl': ttl, 'extra': extra }) return updated_zone def create_record(self, name, zone, type, data, extra=None): params = { 'name': name, 'type': self.RECORD_TYPE_MAP[type], 'domain_id': zone.id, 'content': data } merged = merge_valid_keys( params=params, valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra ) self.connection.set_context({'resource': 'zone', 'id': zone.id}) result = self.connection.request( API_ROOT + '/dns/record/', data=json.dumps(params), method='POST').object record = Record(id=result['id'], name=name, type=type, data=data, extra=merged, zone=zone, ttl=merged.get('ttl', None), driver=self) return record def update_record(self, record, name=None, type=None, data=None, extra=None): params = { 'domain_id': record.zone.id, 'record_id': record.id } if name: params['name'] = name if data: params['content'] = data if type is not None: params['type'] = self.RECORD_TYPE_MAP[type] merged = merge_valid_keys( params=params, valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra ) self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.request(API_ROOT + '/dns/record/', data=json.dumps(params), method='PUT').object updated_record = get_new_obj( obj=record, klass=Record, attributes={ 'name': name, 'data': data, 'type': type, 'extra': merged }) return updated_record apache-libcloud-2.2.1/libcloud/dns/drivers/rackspace.py0000664000175000017500000006003612701223644022771 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from libcloud.utils.py3 import httplib from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.base import PollingConnection from libcloud.common.exceptions import BaseHTTPError from libcloud.common.types import LibcloudError from libcloud.utils.misc import merge_valid_keys, get_new_obj from libcloud.common.rackspace import AUTH_URL from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection from libcloud.compute.drivers.openstack import OpenStack_1_1_Response from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record __all__ = [ 'RackspaceDNSResponse', 'RackspaceDNSConnection' ] VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1'] VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment', 'priority', 'created', 'updated'] class RackspaceDNSResponse(OpenStack_1_1_Response): """ Rackspace DNS Response class. """ def parse_error(self): status = int(self.status) context = self.connection.context body = self.parse_body() if status == httplib.NOT_FOUND: if context['resource'] == 'zone': raise ZoneDoesNotExistError(value='', driver=self, zone_id=context['id']) elif context['resource'] == 'record': raise RecordDoesNotExistError(value='', driver=self, record_id=context['id']) if body: if 'code' and 'message' in body: err = '%s - %s (%s)' % (body['code'], body['message'], body['details']) return err elif 'validationErrors' in body: errors = [m for m in body['validationErrors']['messages']] err = 'Validation errors: %s' % ', '.join(errors) return err raise LibcloudError('Unexpected status code: %s' % (status)) class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): """ Rackspace DNS Connection class. """ responseCls = RackspaceDNSResponse XML_NAMESPACE = None poll_interval = 2.5 timeout = 30 auth_url = AUTH_URL _auth_version = '2.0' def __init__(self, *args, **kwargs): self.region = kwargs.pop('region', None) super(RackspaceDNSConnection, self).__init__(*args, **kwargs) def get_poll_request_kwargs(self, response, context, request_kwargs): job_id = response.object['jobId'] kwargs = {'action': '/status/%s' % (job_id), 'params': {'showDetails': True}} return kwargs def has_completed(self, response): status = response.object['status'] if status == 'ERROR': data = response.object['error'] if 'code' and 'message' in data: message = '%s - %s (%s)' % (data['code'], data['message'], data['details']) else: message = data['message'] raise LibcloudError(message, driver=self.driver) return status == 'COMPLETED' def get_endpoint(self): if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint(name='cloudDNS', service_type='rax:dns', region=None) else: raise LibcloudError("Auth version %s not supported" % (self._auth_version)) public_url = ep.url # This is a nasty hack, but because of how global auth and old accounts # work, there is no way around it. if self.region == 'us': # Old UK account, which only has us endpoint in the catalog public_url = public_url.replace('https://lon.dns.api', 'https://dns.api') if self.region == 'uk': # Old US account, which only has uk endpoint in the catalog public_url = public_url.replace('https://dns.api', 'https://lon.dns.api') return public_url class RackspacePTRRecord(object): def __init__(self, id, ip, domain, driver, extra=None): self.id = str(id) if id else None self.ip = ip self.type = RecordType.PTR self.domain = domain self.driver = driver self.extra = extra or {} def update(self, domain, extra=None): return self.driver.ex_update_ptr_record(record=self, domain=domain, extra=extra) def delete(self): return self.driver.ex_delete_ptr_record(record=self) def __repr__(self): return ('<%s: ip=%s, domain=%s, provider=%s ...>' % (self.__class__.__name__, self.ip, self.domain, self.driver.name)) class RackspaceDNSDriver(DNSDriver, OpenStackDriverMixin): name = 'Rackspace DNS' website = 'http://www.rackspace.com/' type = Provider.RACKSPACE connectionCls = RackspaceDNSConnection def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us', **kwargs): valid_regions = self.list_regions() if region not in valid_regions: raise ValueError('Invalid region: %s' % (region)) OpenStackDriverMixin.__init__(self, **kwargs) super(RackspaceDNSDriver, self).__init__(key=key, secret=secret, host=host, port=port, region=region) RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } @classmethod def list_regions(cls): return ['us', 'uk'] def iterate_zones(self): offset = 0 limit = 100 while True: params = { 'limit': limit, 'offset': offset, } response = self.connection.request( action='/domains', params=params).object zones_list = response['domains'] for item in zones_list: yield self._to_zone(item) if _rackspace_result_has_more(response, len(zones_list), limit): offset += limit else: break def iterate_records(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) offset = 0 limit = 100 while True: params = { 'showRecord': True, 'limit': limit, 'offset': offset, } response = self.connection.request( action='/domains/%s' % (zone.id), params=params).object records_list = response['recordsList'] records = records_list['records'] for item in records: record = self._to_record(data=item, zone=zone) yield record if _rackspace_result_has_more(records_list, len(records), limit): offset += limit else: break def get_zone(self, zone_id): self.connection.set_context({'resource': 'zone', 'id': zone_id}) response = self.connection.request(action='/domains/%s' % (zone_id)) zone = self._to_zone(data=response.object) return zone def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) self.connection.set_context({'resource': 'record', 'id': record_id}) response = self.connection.request(action='/domains/%s/records/%s' % (zone_id, record_id)).object record = self._to_record(data=response, zone=zone) return record def create_zone(self, domain, type='master', ttl=None, extra=None): extra = extra if extra else {} # Email address is required if 'email' not in extra: raise ValueError('"email" key must be present in extra dictionary') payload = {'name': domain, 'emailAddress': extra['email'], 'recordsList': {'records': []}} if ttl: payload['ttl'] = ttl if 'comment' in extra: payload['comment'] = extra['comment'] data = {'domains': [payload]} response = self.connection.async_request(action='/domains', method='POST', data=data) zone = self._to_zone(data=response.object['response']['domains'][0]) return zone def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): # Only ttl, comment and email address can be changed extra = extra if extra else {} if domain: raise LibcloudError('Domain cannot be changed', driver=self) data = {} if ttl: data['ttl'] = int(ttl) if 'email' in extra: data['emailAddress'] = extra['email'] if 'comment' in extra: data['comment'] = extra['comment'] type = type if type else zone.type ttl = ttl if ttl else zone.ttl self.connection.set_context({'resource': 'zone', 'id': zone.id}) self.connection.async_request(action='/domains/%s' % (zone.id), method='PUT', data=data) merged = merge_valid_keys(params=copy.deepcopy(zone.extra), valid_keys=VALID_ZONE_EXTRA_PARAMS, extra=extra) updated_zone = get_new_obj(obj=zone, klass=Zone, attributes={'type': type, 'ttl': ttl, 'extra': merged}) return updated_zone def create_record(self, name, zone, type, data, extra=None): # Name must be a FQDN - e.g. if domain is "foo.com" then a record # name is "bar.foo.com" extra = extra if extra else {} name = self._to_full_record_name(domain=zone.domain, name=name) data = {'name': name, 'type': self.RECORD_TYPE_MAP[type], 'data': data} if 'ttl' in extra: data['ttl'] = int(extra['ttl']) if 'priority' in extra: data['priority'] = int(extra['priority']) payload = {'records': [data]} self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.async_request(action='/domains/%s/records' % (zone.id), data=payload, method='POST').object record = self._to_record(data=response['response']['records'][0], zone=zone) return record def update_record(self, record, name=None, type=None, data=None, extra=None): # Only data, ttl, and comment attributes can be modified, but name # attribute must always be present. extra = extra if extra else {} name = self._to_full_record_name(domain=record.zone.domain, name=record.name) payload = {'name': name} if data: payload['data'] = data if 'ttl' in extra: payload['ttl'] = extra['ttl'] if 'comment' in extra: payload['comment'] = extra['comment'] type = type if type is not None else record.type data = data if data else record.data self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.async_request(action='/domains/%s/records/%s' % (record.zone.id, record.id), method='PUT', data=payload) merged = merge_valid_keys(params=copy.deepcopy(record.extra), valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra) updated_record = get_new_obj(obj=record, klass=Record, attributes={'type': type, 'data': data, 'driver': self, 'extra': merged}) return updated_record def delete_zone(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) self.connection.async_request(action='/domains/%s' % (zone.id), method='DELETE') return True def delete_record(self, record): self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.async_request(action='/domains/%s/records/%s' % (record.zone.id, record.id), method='DELETE') return True def ex_iterate_ptr_records(self, device): """ Return a generator to iterate over existing PTR Records. The ``device`` should be an instance of one of these: :class:`libcloud.compute.base.Node` :class:`libcloud.loadbalancer.base.LoadBalancer` And it needs to have the following ``extra`` fields set: service_name - the service catalog name for the device uri - the URI pointing to the GET endpoint for the device Those are automatically set for you if you got the device from the Rackspace driver for that service. For example: server = rs_compute.ex_get_node_details(id) ptr_iter = rs_dns.ex_list_ptr_records(server) loadbalancer = rs_lbs.get_balancer(id) ptr_iter = rs_dns.ex_list_ptr_records(loadbalancer) Note: the Rackspace DNS API docs indicate that the device 'href' is optional, but testing does not bear this out. It throws a 400 Bad Request error if you do not pass in the 'href' from the server or loadbalancer. So ``device`` is required. :param device: the device that owns the IP :rtype: ``generator`` of :class:`RackspacePTRRecord` """ _check_ptr_extra_fields(device) params = {'href': device.extra['uri']} service_name = device.extra['service_name'] # without a valid context, the 404 on empty list will blow up # in the error-handling code self.connection.set_context({'resource': 'ptr_records'}) try: response = self.connection.request( action='/rdns/%s' % (service_name), params=params).object records = response['records'] link = dict(rel=service_name, **params) for item in records: record = self._to_ptr_record(data=item, link=link) yield record except BaseHTTPError as exc: # 404 just means empty list if exc.code == 404: return raise def ex_get_ptr_record(self, service_name, record_id): """ Get a specific PTR record by id. :param service_name: the service catalog name of the linked device(s) i.e. cloudLoadBalancers or cloudServersOpenStack :param record_id: the id (i.e. PTR-12345) of the PTR record :rtype: instance of :class:`RackspacePTRRecord` """ self.connection.set_context({'resource': 'record', 'id': record_id}) response = self.connection.request( action='/rdns/%s/%s' % (service_name, record_id)).object item = next(iter(response['recordsList']['records'])) return self._to_ptr_record(data=item, link=response['link']) def ex_create_ptr_record(self, device, ip, domain, extra=None): """ Create a PTR record for a specific IP on a specific device. The ``device`` should be an instance of one of these: :class:`libcloud.compute.base.Node` :class:`libcloud.loadbalancer.base.LoadBalancer` And it needs to have the following ``extra`` fields set: service_name - the service catalog name for the device uri - the URI pointing to the GET endpoint for the device Those are automatically set for you if you got the device from the Rackspace driver for that service. For example: server = rs_compute.ex_get_node_details(id) rs_dns.create_ptr_record(server, ip, domain) loadbalancer = rs_lbs.get_balancer(id) rs_dns.create_ptr_record(loadbalancer, ip, domain) :param device: the device that owns the IP :param ip: the IP for which you want to set reverse DNS :param domain: the fqdn you want that IP to represent :param extra: a ``dict`` with optional extra values: ttl - the time-to-live of the PTR record :rtype: instance of :class:`RackspacePTRRecord` """ _check_ptr_extra_fields(device) if extra is None: extra = {} # the RDNS API reverse the name and data fields for PTRs # the record name *should* be the ip and the data the fqdn data = { "name": domain, "type": RecordType.PTR, "data": ip } if 'ttl' in extra: data['ttl'] = extra['ttl'] payload = { "recordsList": { "records": [data] }, "link": { "content": "", "href": device.extra['uri'], "rel": device.extra['service_name'], } } response = self.connection.async_request( action='/rdns', method='POST', data=payload).object item = next(iter(response['response']['records'])) return self._to_ptr_record(data=item, link=payload['link']) def ex_update_ptr_record(self, record, domain=None, extra=None): """ Update a PTR record for a specific IP on a specific device. If you need to change the domain or ttl, use this API to update the record by deleting the old one and creating a new one. :param record: the original :class:`RackspacePTRRecord` :param domain: the fqdn you want that IP to represent :param extra: a ``dict`` with optional extra values: ttl - the time-to-live of the PTR record :rtype: instance of :class:`RackspacePTRRecord` """ if domain is not None and domain == record.domain: domain = None if extra is not None: extra = dict(extra) for key in extra: if key in record.extra and record.extra[key] == extra[key]: del extra[key] if domain is None and not extra: # nothing to do, it already matches return record _check_ptr_extra_fields(record) ip = record.ip self.ex_delete_ptr_record(record) # records have the same metadata in 'extra' as the original device # so you can pass the original record object in instead return self.ex_create_ptr_record(record, ip, domain, extra=extra) def ex_delete_ptr_record(self, record): """ Delete an existing PTR Record :param record: the original :class:`RackspacePTRRecord` :rtype: ``bool`` """ _check_ptr_extra_fields(record) self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.async_request( action='/rdns/%s' % (record.extra['service_name']), method='DELETE', params={'href': record.extra['uri'], 'ip': record.ip}, ) return True def _to_zone(self, data): id = data['id'] domain = data['name'] type = 'master' ttl = data.get('ttl', 0) extra = {} if 'emailAddress' in data: extra['email'] = data['emailAddress'] if 'comment' in data: extra['comment'] = data['comment'] zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), driver=self, extra=extra) return zone def _to_record(self, data, zone): id = data['id'] fqdn = data['name'] name = self._to_partial_record_name(domain=zone.domain, name=fqdn) type = self._string_to_record_type(data['type']) record_data = data['data'] extra = {'fqdn': fqdn} for key in VALID_RECORD_EXTRA_PARAMS: if key in data: extra[key] = data[key] record = Record(id=str(id), name=name, type=type, data=record_data, zone=zone, driver=self, ttl=extra.get('ttl', None), extra=extra) return record def _to_ptr_record(self, data, link): id = data['id'] ip = data['data'] domain = data['name'] extra = {'uri': link['href'], 'service_name': link['rel']} for key in VALID_RECORD_EXTRA_PARAMS: if key in data: extra[key] = data[key] record = RackspacePTRRecord(id=str(id), ip=ip, domain=domain, driver=self, extra=extra) return record def _to_full_record_name(self, domain, name): """ Build a FQDN from a domain and record name. :param domain: Domain name. :type domain: ``str`` :param name: Record name. :type name: ``str`` """ if name: name = '%s.%s' % (name, domain) else: name = domain return name def _to_partial_record_name(self, domain, name): """ Remove domain portion from the record name. :param domain: Domain name. :type domain: ``str`` :param name: Full record name (fqdn). :type name: ``str`` """ if name == domain: # Map "root" record names to None to be consistent with other # drivers return None # Strip domain portion name = name.replace('.%s' % (domain), '') return name def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs['region'] = self.region return kwargs def _rackspace_result_has_more(response, result_length, limit): # If rackspace returns less than the limit, then we've reached the end of # the result set. if result_length < limit: return False # Paginated results return links to the previous and next sets of data, but # 'next' only exists when there is more to get. for item in response.get('links', ()): if item['rel'] == 'next': return True return False def _check_ptr_extra_fields(device_or_record): if not (hasattr(device_or_record, 'extra') and isinstance(device_or_record.extra, dict) and device_or_record.extra.get('uri') is not None and device_or_record.extra.get('service_name') is not None): raise LibcloudError("Can't create PTR Record for %s because it " "doesn't have a 'uri' and 'service_name' in " "'extra'" % device_or_record) apache-libcloud-2.2.1/libcloud/dns/drivers/linode.py0000664000175000017500000002415713153541406022314 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'LinodeDNSDriver' ] from libcloud.utils.misc import merge_valid_keys, get_new_obj from libcloud.common.linode import (API_ROOT, LinodeException, LinodeConnection, LinodeResponse) from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec', 'Expire_sec', 'status', 'master_ips'] VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol', 'TTL_sec'] class LinodeDNSResponse(LinodeResponse): def _make_excp(self, error): result = super(LinodeDNSResponse, self)._make_excp(error) if isinstance(result, LinodeException) and result.code == 5: context = self.connection.context if context['resource'] == 'zone': result = ZoneDoesNotExistError(value='', driver=self.connection.driver, zone_id=context['id']) elif context['resource'] == 'record': result = RecordDoesNotExistError(value='', driver=self.connection.driver, record_id=context['id']) return result class LinodeDNSConnection(LinodeConnection): responseCls = LinodeDNSResponse class LinodeDNSDriver(DNSDriver): type = Provider.LINODE name = 'Linode DNS' website = 'http://www.linode.com/' connectionCls = LinodeDNSConnection RECORD_TYPE_MAP = { RecordType.NS: 'NS', RecordType.MX: 'MX', RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.TXT: 'TXT', RecordType.SRV: 'SRV', } def list_zones(self): params = {'api_action': 'domain.list'} data = self.connection.request(API_ROOT, params=params).objects[0] zones = self._to_zones(data) return zones def list_records(self, zone): params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id} self.connection.set_context(context={'resource': 'zone', 'id': zone.id}) data = self.connection.request(API_ROOT, params=params).objects[0] records = self._to_records(items=data, zone=zone) return records def get_zone(self, zone_id): params = {'api_action': 'domain.list', 'DomainID': zone_id} self.connection.set_context(context={'resource': 'zone', 'id': zone_id}) data = self.connection.request(API_ROOT, params=params).objects[0] zones = self._to_zones(data) if len(zones) != 1: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) return zones[0] def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) params = {'api_action': 'domain.resource.list', 'DomainID': zone_id, 'ResourceID': record_id} self.connection.set_context(context={'resource': 'record', 'id': record_id}) data = self.connection.request(API_ROOT, params=params).objects[0] records = self._to_records(items=data, zone=zone) if len(records) != 1: raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) return records[0] def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. API docs: http://www.linode.com/api/dns/domain.create """ params = {'api_action': 'domain.create', 'Type': type, 'Domain': domain} if ttl: params['TTL_sec'] = ttl merged = merge_valid_keys(params=params, valid_keys=VALID_ZONE_EXTRA_PARAMS, extra=extra) data = self.connection.request(API_ROOT, params=params).objects[0] zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl, extra=merged, driver=self) return zone def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): """ Update an existing zone. API docs: http://www.linode.com/api/dns/domain.update """ params = {'api_action': 'domain.update', 'DomainID': zone.id} if type: params['Type'] = type if domain: params['Domain'] = domain if ttl: params['TTL_sec'] = ttl merged = merge_valid_keys(params=params, valid_keys=VALID_ZONE_EXTRA_PARAMS, extra=extra) self.connection.request(API_ROOT, params=params).objects[0] updated_zone = get_new_obj(obj=zone, klass=Zone, attributes={'domain': domain, 'type': type, 'ttl': ttl, 'extra': merged}) return updated_zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. API docs: http://www.linode.com/api/dns/domain.resource.create """ params = {'api_action': 'domain.resource.create', 'DomainID': zone.id, 'Name': name, 'Target': data, 'Type': self.RECORD_TYPE_MAP[type]} merged = merge_valid_keys(params=params, valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra) result = self.connection.request(API_ROOT, params=params).objects[0] record = Record(id=result['ResourceID'], name=name, type=type, data=data, extra=merged, zone=zone, driver=self, ttl=merged.get('TTL_sec', None)) return record def update_record(self, record, name=None, type=None, data=None, extra=None): """ Update an existing record. API docs: http://www.linode.com/api/dns/domain.resource.update """ params = {'api_action': 'domain.resource.update', 'ResourceID': record.id, 'DomainID': record.zone.id} if name: params['Name'] = name if data: params['Target'] = data if type is not None: params['Type'] = self.RECORD_TYPE_MAP[type] merged = merge_valid_keys(params=params, valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra) self.connection.request(API_ROOT, params=params).objects[0] updated_record = get_new_obj(obj=record, klass=Record, attributes={'name': name, 'data': data, 'type': type, 'extra': merged}) return updated_record def delete_zone(self, zone): params = {'api_action': 'domain.delete', 'DomainID': zone.id} self.connection.set_context(context={'resource': 'zone', 'id': zone.id}) data = self.connection.request(API_ROOT, params=params).objects[0] return 'DomainID' in data def delete_record(self, record): params = {'api_action': 'domain.resource.delete', 'DomainID': record.zone.id, 'ResourceID': record.id} self.connection.set_context(context={'resource': 'record', 'id': record.id}) data = self.connection.request(API_ROOT, params=params).objects[0] return 'ResourceID' in data def _to_zones(self, items): """ Convert a list of items to the Zone objects. """ zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_zone(self, item): """ Build an Zone object from the item dictionary. """ extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'], 'description': item['DESCRIPTION']} zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'], type=item['TYPE'], ttl=item['TTL_SEC'], driver=self, extra=extra) return zone def _to_records(self, items, zone=None): """ Convert a list of items to the Record objects. """ records = [] for item in items: records.append(self._to_record(item=item, zone=zone)) return records def _to_record(self, item, zone=None): """ Build a Record object from the item dictionary. """ extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'], 'port': item['PORT'], 'weight': item['WEIGHT'], 'priority': item['PRIORITY']} type = self._string_to_record_type(item['TYPE']) record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type, data=item['TARGET'], zone=zone, driver=self, ttl=item['TTL_SEC'], extra=extra) return record apache-libcloud-2.2.1/libcloud/dns/drivers/zerigo.py0000664000175000017500000004351613153541406022341 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'ZerigoDNSDriver' ] import copy import base64 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.py3 import ET from libcloud.utils.misc import merge_valid_keys, get_new_obj from libcloud.utils.xml import findtext, findall from libcloud.common.base import XmlResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.types import MalformedResponseError from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record API_HOST = 'ns.zerigo.com' API_VERSION = '1.1' API_ROOT = '/api/%s/' % (API_VERSION) VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers'] VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority'] # Number of items per page (maximum limit is 1000) ITEMS_PER_PAGE = 100 class ZerigoError(LibcloudError): def __init__(self, code, errors): self.code = code self.errors = errors or [] def __str__(self): return 'Errors: %s' % (', '.join(self.errors)) def __repr__(self): return ('' % ( self.code, len(self.errors))) class ZerigoDNSResponse(XmlResponse): def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): status = int(self.status) if status == 401: if not self.body: raise InvalidCredsError(str(self.status) + ': ' + self.error) else: raise InvalidCredsError(self.body) elif status == 404: context = self.connection.context if context['resource'] == 'zone': raise ZoneDoesNotExistError(value='', driver=self, zone_id=context['id']) elif context['resource'] == 'record': raise RecordDoesNotExistError(value='', driver=self, record_id=context['id']) elif status != 503: try: body = ET.XML(self.body) except: raise MalformedResponseError('Failed to parse XML', body=self.body) errors = [] for error in findall(element=body, xpath='error'): errors.append(error.text) raise ZerigoError(code=status, errors=errors) return self.body class ZerigoDNSConnection(ConnectionUserAndKey): host = API_HOST secure = True responseCls = ZerigoDNSResponse def add_default_headers(self, headers): auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8')) return headers def request(self, action, params=None, data='', headers=None, method='GET'): if not headers: headers = {} if not params: params = {} if method in ("POST", "PUT"): headers = {'Content-Type': 'application/xml; charset=UTF-8'} return super(ZerigoDNSConnection, self).request(action=action, params=params, data=data, method=method, headers=headers) class ZerigoDNSDriver(DNSDriver): type = Provider.ZERIGO name = 'Zerigo DNS' website = 'http://www.zerigo.com/' connectionCls = ZerigoDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.GEO: 'GEO', RecordType.MX: 'MX', RecordType.NAPTR: 'NAPTR', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.REDIRECT: 'REDIRECT', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', RecordType.URL: 'URL', } def iterate_zones(self): return self._get_more('zones') def iterate_records(self, zone): return self._get_more('records', zone=zone) def get_zone(self, zone_id): path = API_ROOT + 'zones/%s.xml' % (zone_id) self.connection.set_context({'resource': 'zone', 'id': zone_id}) data = self.connection.request(path).object zone = self._to_zone(elem=data) return zone def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) self.connection.set_context({'resource': 'record', 'id': record_id}) path = API_ROOT + 'hosts/%s.xml' % (record_id) data = self.connection.request(path).object record = self._to_record(elem=data, zone=zone) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/zones/create @inherits: :class:`DNSDriver.create_zone` """ path = API_ROOT + 'zones.xml' zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, extra=extra) data = self.connection.request(action=path, data=ET.tostring(zone_elem), method='POST').object zone = self._to_zone(elem=data) return zone def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): """ Update an existing zone. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/zones/update @inherits: :class:`DNSDriver.update_zone` """ if domain: raise LibcloudError('Domain cannot be changed', driver=self) path = API_ROOT + 'zones/%s.xml' % (zone.id) zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, extra=extra) response = self.connection.request(action=path, data=ET.tostring(zone_elem), method='PUT') assert response.status == httplib.OK merged = merge_valid_keys(params=copy.deepcopy(zone.extra), valid_keys=VALID_ZONE_EXTRA_PARAMS, extra=extra) updated_zone = get_new_obj(obj=zone, klass=Zone, attributes={'type': type, 'ttl': ttl, 'extra': merged}) return updated_zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/hosts/create @inherits: :class:`DNSDriver.create_record` """ path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) record_elem = self._to_record_elem(name=name, type=type, data=data, extra=extra) response = self.connection.request(action=path, data=ET.tostring(record_elem), method='POST') assert response.status == httplib.CREATED record = self._to_record(elem=response.object, zone=zone) return record def update_record(self, record, name=None, type=None, data=None, extra=None): path = API_ROOT + 'hosts/%s.xml' % (record.id) record_elem = self._to_record_elem(name=name, type=type, data=data, extra=extra) response = self.connection.request(action=path, data=ET.tostring(record_elem), method='PUT') assert response.status == httplib.OK merged = merge_valid_keys(params=copy.deepcopy(record.extra), valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra) updated_record = get_new_obj(obj=record, klass=Record, attributes={'type': type, 'data': data, 'extra': merged}) return updated_record def delete_zone(self, zone): path = API_ROOT + 'zones/%s.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(action=path, method='DELETE') return response.status == httplib.OK def delete_record(self, record): path = API_ROOT + 'hosts/%s.xml' % (record.id) self.connection.set_context({'resource': 'record', 'id': record.id}) response = self.connection.request(action=path, method='DELETE') return response.status == httplib.OK def ex_get_zone_by_domain(self, domain): """ Retrieve a zone object by the domain name. :param domain: The domain which should be used :type domain: ``str`` :rtype: :class:`Zone` """ path = API_ROOT + 'zones/%s.xml' % (domain) self.connection.set_context({'resource': 'zone', 'id': domain}) data = self.connection.request(path).object zone = self._to_zone(elem=data) return zone def ex_force_slave_axfr(self, zone): """ Force a zone transfer. :param zone: Zone which should be used. :type zone: :class:`Zone` :rtype: :class:`Zone` """ path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(path, method='POST') assert response.status == httplib.ACCEPTED return zone def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None): zone_elem = ET.Element('zone', {}) if domain: domain_elem = ET.SubElement(zone_elem, 'domain') domain_elem.text = domain if type: ns_type_elem = ET.SubElement(zone_elem, 'ns-type') if type == 'master': ns_type_elem.text = 'pri_sec' elif type == 'slave': if not extra or 'ns1' not in extra: raise LibcloudError('ns1 extra attribute is required ' + 'when zone type is slave', driver=self) ns_type_elem.text = 'sec' ns1_elem = ET.SubElement(zone_elem, 'ns1') ns1_elem.text = extra['ns1'] elif type == 'std_master': # TODO: Each driver should provide supported zone types # Slave name servers are elsewhere if not extra or 'slave-nameservers' not in extra: raise LibcloudError('slave-nameservers extra ' + 'attribute is required whenzone ' + 'type is std_master', driver=self) ns_type_elem.text = 'pri' slave_nameservers_elem = ET.SubElement(zone_elem, 'slave-nameservers') slave_nameservers_elem.text = extra['slave-nameservers'] if ttl: default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl') default_ttl_elem.text = str(ttl) if extra and 'tag-list' in extra: tags = extra['tag-list'] tags_elem = ET.SubElement(zone_elem, 'tag-list') tags_elem.text = ' '.join(tags) return zone_elem def _to_record_elem(self, name=None, type=None, data=None, extra=None): record_elem = ET.Element('host', {}) if name: name_elem = ET.SubElement(record_elem, 'hostname') name_elem.text = name if type is not None: type_elem = ET.SubElement(record_elem, 'host-type') type_elem.text = self.RECORD_TYPE_MAP[type] if data: data_elem = ET.SubElement(record_elem, 'data') data_elem.text = data if extra: if 'ttl' in extra: ttl_elem = ET.SubElement(record_elem, 'ttl', {'type': 'integer'}) ttl_elem.text = str(extra['ttl']) if 'priority' in extra: # Only MX and SRV records support priority priority_elem = ET.SubElement(record_elem, 'priority', {'type': 'integer'}) priority_elem.text = str(extra['priority']) if 'notes' in extra: notes_elem = ET.SubElement(record_elem, 'notes') notes_elem.text = extra['notes'] return record_elem def _to_zones(self, elem): zones = [] for item in findall(element=elem, xpath='zone'): zone = self._to_zone(elem=item) zones.append(zone) return zones def _to_zone(self, elem): id = findtext(element=elem, xpath='id') domain = findtext(element=elem, xpath='domain') type = findtext(element=elem, xpath='ns-type') type = 'master' if type.find('pri') == 0 else 'slave' ttl = findtext(element=elem, xpath='default-ttl') hostmaster = findtext(element=elem, xpath='hostmaster') custom_ns = findtext(element=elem, xpath='custom-ns') custom_nameservers = findtext(element=elem, xpath='custom-nameservers') notes = findtext(element=elem, xpath='notes') nx_ttl = findtext(element=elem, xpath='nx-ttl') slave_nameservers = findtext(element=elem, xpath='slave-nameservers') tags = findtext(element=elem, xpath='tag-list') tags = tags.split(' ') if tags else [] extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns, 'custom-nameservers': custom_nameservers, 'notes': notes, 'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers, 'tags': tags} zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), driver=self, extra=extra) return zone def _to_records(self, elem, zone): records = [] for item in findall(element=elem, xpath='host'): record = self._to_record(elem=item, zone=zone) records.append(record) return records def _to_record(self, elem, zone): id = findtext(element=elem, xpath='id') name = findtext(element=elem, xpath='hostname') type = findtext(element=elem, xpath='host-type') type = self._string_to_record_type(type) data = findtext(element=elem, xpath='data') notes = findtext(element=elem, xpath='notes', no_text_value=None) state = findtext(element=elem, xpath='state', no_text_value=None) fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None) priority = findtext(element=elem, xpath='priority', no_text_value=None) ttl = findtext(element=elem, xpath='ttl', no_text_value=None) if not name: name = None if ttl: ttl = int(ttl) extra = {'notes': notes, 'state': state, 'fqdn': fqdn, 'priority': priority, 'ttl': ttl} record = Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=ttl, extra=extra) return record def _get_more(self, rtype, **kwargs): exhausted = False last_key = None while not exhausted: items, last_key, exhausted = self._get_data(rtype, last_key, **kwargs) for item in items: yield item def _get_data(self, rtype, last_key, **kwargs): # Note: last_key in this case really is a "last_page". # TODO: Update base driver and change last_key to something more # generic - e.g. marker params = {} params['per_page'] = ITEMS_PER_PAGE params['page'] = last_key + 1 if last_key else 1 if rtype == 'zones': path = API_ROOT + 'zones.xml' response = self.connection.request(path) transform_func = self._to_zones elif rtype == 'records': zone = kwargs['zone'] path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(path, params=params) transform_func = self._to_records exhausted = False result_count = int(response.headers.get('x-query-count', 0)) if (params['page'] * ITEMS_PER_PAGE) >= result_count: exhausted = True if response.status == httplib.OK: items = transform_func(elem=response.object, **kwargs) return items, params['page'], exhausted else: return [], None, True apache-libcloud-2.2.1/libcloud/dns/drivers/auroradns.py0000664000175000017500000005324513153541406023040 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ AuroraDNS DNS Driver """ import base64 import json import hmac import datetime from hashlib import sha256 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.common.types import InvalidCredsError, ProviderError from libcloud.common.types import LibcloudError from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import RecordType, ZoneDoesNotExistError from libcloud.dns.types import ZoneAlreadyExistsError, RecordDoesNotExistError API_HOST = 'api.auroradns.eu' # Default TTL required by libcloud, but doesn't do anything in AuroraDNS DEFAULT_ZONE_TTL = 3600 DEFAULT_ZONE_TYPE = 'master' VALID_RECORD_PARAMS_EXTRA = ['ttl', 'prio', 'health_check_id', 'disabled'] class AuroraDNSHealthCheckType(object): """ Healthcheck type. """ HTTP = 'HTTP' HTTPS = 'HTTPS' TCP = 'TCP' class HealthCheckError(LibcloudError): error_type = 'HealthCheckError' def __init__(self, value, driver, health_check_id): self.health_check_id = health_check_id super(HealthCheckError, self).__init__(value=value, driver=driver) def __str__(self): return self.__repr__() def __repr__(self): return ('<%s in %s, health_check_id=%s, value=%s>' % (self.error_type, repr(self.driver), self.health_check_id, self.value)) class HealthCheckDoesNotExistError(HealthCheckError): error_type = 'HealthCheckDoesNotExistError' class AuroraDNSHealthCheck(object): """ AuroraDNS Healthcheck resource. """ def __init__(self, id, type, hostname, ipaddress, port, interval, path, threshold, health, enabled, zone, driver, extra=None): """ :param id: Healthcheck id :type id: ``str`` :param hostname: Hostname or FQDN of the target :type hostname: ``str`` :param ipaddress: IPv4 or IPv6 address of the target :type ipaddress: ``str`` :param port: The port on the target to monitor :type port: ``int`` :param interval: The interval of the health check :type interval: ``int`` :param path: The path to monitor on the target :type path: ``str`` :param threshold: The threshold of before marking a check as failed :type threshold: ``int`` :param health: The current health of the health check :type health: ``bool`` :param enabled: If the health check is currently enabled :type enabled: ``bool`` :param zone: Zone instance. :type zone: :class:`Zone` :param driver: DNSDriver instance. :type driver: :class:`DNSDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.type = type self.hostname = hostname self.ipaddress = ipaddress self.port = int(port) if port else None self.interval = int(interval) self.path = path self.threshold = int(threshold) self.health = bool(health) self.enabled = bool(enabled) self.zone = zone self.driver = driver self.extra = extra or {} def update(self, type=None, hostname=None, ipaddress=None, port=None, interval=None, path=None, threshold=None, enabled=None, extra=None): return self.driver.ex_update_healthcheck(healthcheck=self, type=type, hostname=hostname, ipaddress=ipaddress, port=port, path=path, interval=interval, threshold=threshold, enabled=enabled, extra=extra) def delete(self): return self.driver.ex_delete_healthcheck(healthcheck=self) def __repr__(self): return ('' % (self.zone.id, self.id, self.type, self.hostname, self.ipaddress, self.port, self.interval, self.health, self.driver.name)) class AuroraDNSResponse(JsonResponse): def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): status = int(self.status) error = {'driver': self, 'value': ''} if status == httplib.UNAUTHORIZED: error['value'] = 'Authentication failed' raise InvalidCredsError(**error) elif status == httplib.FORBIDDEN: error['value'] = 'Authorization failed' error['http_status'] = status raise ProviderError(**error) elif status == httplib.NOT_FOUND: context = self.connection.context if context['resource'] == 'zone': error['zone_id'] = context['id'] raise ZoneDoesNotExistError(**error) elif context['resource'] == 'record': error['record_id'] = context['id'] raise RecordDoesNotExistError(**error) elif context['resource'] == 'healthcheck': error['health_check_id'] = context['id'] raise HealthCheckDoesNotExistError(**error) elif status == httplib.CONFLICT: context = self.connection.context if context['resource'] == 'zone': error['zone_id'] = context['id'] raise ZoneAlreadyExistsError(**error) elif status == httplib.BAD_REQUEST: context = self.connection.context body = self.parse_body() raise ProviderError(value=body['errormsg'], http_code=status, driver=self) class AuroraDNSConnection(ConnectionUserAndKey): host = API_HOST responseCls = AuroraDNSResponse def calculate_auth_signature(self, secret_key, method, url, timestamp): b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(method) + b(url) + b(timestamp), digestmod=sha256).digest() ) return b64_hmac.decode('utf-8') def gen_auth_header(self, api_key, secret_key, method, url, timestamp): signature = self.calculate_auth_signature(secret_key, method, url, timestamp) auth_b64 = base64.b64encode(b('%s:%s' % (api_key, signature))) return 'AuroraDNSv1 %s' % (auth_b64.decode('utf-8')) def request(self, action, params=None, data='', headers=None, method='GET'): if not headers: headers = {} if not params: params = {} if method in ("POST", "PUT"): headers = {'Content-Type': 'application/json; charset=UTF-8'} t = datetime.datetime.utcnow() timestamp = t.strftime('%Y%m%dT%H%M%SZ') headers['X-AuroraDNS-Date'] = timestamp headers['Authorization'] = self.gen_auth_header(self.user_id, self.key, method, action, timestamp) return super(AuroraDNSConnection, self).request(action=action, params=params, data=data, method=method, headers=headers) class AuroraDNSDriver(DNSDriver): name = 'AuroraDNS' website = 'https://www.pcextreme.nl/en/aurora/dns' connectionCls = AuroraDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.SOA: 'SOA', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', RecordType.DS: 'DS', RecordType.PTR: 'PTR', RecordType.SSHFP: 'SSHFP', RecordType.TLSA: 'TLSA' } HEALTHCHECK_TYPE_MAP = { AuroraDNSHealthCheckType.HTTP: 'HTTP', AuroraDNSHealthCheckType.HTTPS: 'HTTPS', AuroraDNSHealthCheckType.TCP: 'TCP' } def iterate_zones(self): res = self.connection.request('/zones') for zone in res.parse_body(): yield self.__res_to_zone(zone) def iterate_records(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) res = self.connection.request('/zones/%s/records' % zone.id) for record in res.parse_body(): yield self.__res_to_record(zone, record) def get_zone(self, zone_id): self.connection.set_context({'resource': 'zone', 'id': zone_id}) res = self.connection.request('/zones/%s' % zone_id) zone = res.parse_body() return self.__res_to_zone(zone) def get_record(self, zone_id, record_id): self.connection.set_context({'resource': 'record', 'id': record_id}) res = self.connection.request('/zones/%s/records/%s' % (zone_id, record_id)) record = res.parse_body() zone = self.get_zone(zone_id) return self.__res_to_record(zone, record) def create_zone(self, domain, type='master', ttl=None, extra=None): self.connection.set_context({'resource': 'zone', 'id': domain}) res = self.connection.request('/zones', method='POST', data=json.dumps({'name': domain})) zone = res.parse_body() return self.__res_to_zone(zone) def create_record(self, name, zone, type, data, extra=None): if name is None: name = "" rdata = { 'name': name, 'type': self.RECORD_TYPE_MAP[type], 'content': data } rdata = self.__merge_extra_data(rdata, extra) if 'ttl' not in rdata: rdata['ttl'] = DEFAULT_ZONE_TTL self.connection.set_context({'resource': 'zone', 'id': zone.id}) res = self.connection.request('/zones/%s/records' % zone.id, method='POST', data=json.dumps(rdata)) record = res.parse_body() return self.__res_to_record(zone, record) def delete_zone(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) self.connection.request('/zones/%s' % zone.id, method='DELETE') return True def delete_record(self, record): self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.request('/zones/%s/records/%s' % (record.zone.id, record.id), method='DELETE') return True def list_record_types(self): types = [] for record_type in self.RECORD_TYPE_MAP.keys(): types.append(record_type) return types def update_record(self, record, name, type, data, extra=None): rdata = {} if name is not None: rdata['name'] = name if type is not None: rdata['type'] = self.RECORD_TYPE_MAP[type] if data is not None: rdata['content'] = data rdata = self.__merge_extra_data(rdata, extra) self.connection.set_context({'resource': 'record', 'id': record.id}) self.connection.request('/zones/%s/records/%s' % (record.zone.id, record.id), method='PUT', data=json.dumps(rdata)) return self.get_record(record.zone.id, record.id) def ex_list_healthchecks(self, zone): """ List all Health Checks in a zone. :param zone: Zone to list health checks for. :type zone: :class:`Zone` :return: ``list`` of :class:`AuroraDNSHealthCheck` """ healthchecks = [] self.connection.set_context({'resource': 'zone', 'id': zone.id}) res = self.connection.request('/zones/%s/health_checks' % zone.id) for healthcheck in res.parse_body(): healthchecks.append(self.__res_to_healthcheck(zone, healthcheck)) return healthchecks def ex_get_healthcheck(self, zone, health_check_id): """ Get a single Health Check from a zone :param zone: Zone in which the health check is :type zone: :class:`Zone` :param health_check_id: ID of the required health check :type health_check_id: ``str`` :return: :class:`AuroraDNSHealthCheck` """ self.connection.set_context({'resource': 'healthcheck', 'id': health_check_id}) res = self.connection.request('/zones/%s/health_checks/%s' % (zone.id, health_check_id)) check = res.parse_body() return self.__res_to_healthcheck(zone, check) def ex_create_healthcheck(self, zone, type, hostname, port, path, interval, threshold, ipaddress=None, enabled=True, extra=None): """ Create a new Health Check in a zone :param zone: Zone in which the health check should be created :type zone: :class:`Zone` :param type: The type of health check to be created :type type: :class:`AuroraDNSHealthCheckType` :param hostname: The hostname of the target to monitor :type hostname: ``str`` :param port: The port of the target to monitor. E.g. 80 for HTTP :type port: ``int`` :param path: The path of the target to monitor. Only used by HTTP at this moment. Usually this is simple /. :type path: ``str`` :param interval: The interval of checks. 10, 30 or 60 seconds. :type interval: ``int`` :param threshold: The threshold of failures before the healthcheck is marked as failed. :type threshold: ``int`` :param ipaddress: (optional) The IP Address of the target to monitor. You can pass a empty string if this is not required. :type ipaddress: ``str`` :param enabled: (optional) If this healthcheck is enabled to run :type enabled: ``bool`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :return: :class:`AuroraDNSHealthCheck` """ cdata = { 'type': self.HEALTHCHECK_TYPE_MAP[type], 'hostname': hostname, 'ipaddress': ipaddress, 'port': int(port), 'interval': int(interval), 'path': path, 'threshold': int(threshold), 'enabled': enabled } self.connection.set_context({'resource': 'zone', 'id': zone.id}) res = self.connection.request('/zones/%s/health_checks' % zone.id, method='POST', data=json.dumps(cdata)) healthcheck = res.parse_body() return self.__res_to_healthcheck(zone, healthcheck) def ex_update_healthcheck(self, healthcheck, type=None, hostname=None, ipaddress=None, port=None, path=None, interval=None, threshold=None, enabled=None, extra=None): """ Update an existing Health Check :param zone: The healthcheck which has to be updated :type zone: :class:`AuroraDNSHealthCheck` :param type: (optional) The type of health check to be created :type type: :class:`AuroraDNSHealthCheckType` :param hostname: (optional) The hostname of the target to monitor :type hostname: ``str`` :param ipaddress: (optional) The IP Address of the target to monitor. You can pass a empty string if this is not required. :type ipaddress: ``str`` :param port: (optional) The port of the target to monitor. E.g. 80 for HTTP :type port: ``int`` :param path: (optional) The path of the target to monitor. Only used by HTTP at this moment. Usually just '/'. :type path: ``str`` :param interval: (optional) The interval of checks. 10, 30 or 60 seconds. :type interval: ``int`` :param threshold: (optional) The threshold of failures before the healthcheck is marked as failed. :type threshold: ``int`` :param enabled: (optional) If this healthcheck is enabled to run :type enabled: ``bool`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :return: :class:`AuroraDNSHealthCheck` """ cdata = {} if type is not None: cdata['type'] = self.HEALTHCHECK_TYPE_MAP[type] if hostname is not None: cdata['hostname'] = hostname if ipaddress is not None: if len(ipaddress) == 0: cdata['ipaddress'] = None else: cdata['ipaddress'] = ipaddress if port is not None: cdata['port'] = int(port) if path is not None: cdata['path'] = path if interval is not None: cdata['interval'] = int(interval) if threshold is not None: cdata['threshold'] = threshold if enabled is not None: cdata['enabled'] = bool(enabled) self.connection.set_context({'resource': 'healthcheck', 'id': healthcheck.id}) self.connection.request('/zones/%s/health_checks/%s' % (healthcheck.zone.id, healthcheck.id), method='PUT', data=json.dumps(cdata)) return self.ex_get_healthcheck(healthcheck.zone, healthcheck.id) def ex_delete_healthcheck(self, healthcheck): """ Remove an existing Health Check :param zone: The healthcheck which has to be removed :type zone: :class:`AuroraDNSHealthCheck` """ self.connection.set_context({'resource': 'healthcheck', 'id': healthcheck.id}) self.connection.request('/zones/%s/health_checks/%s' % (healthcheck.zone.id, healthcheck.id), method='DELETE') return True def __res_to_record(self, zone, record): if len(record['name']) == 0: name = None else: name = record['name'] extra = {} extra['created'] = record['created'] extra['modified'] = record['modified'] extra['disabled'] = record['disabled'] extra['ttl'] = record['ttl'] extra['priority'] = record['prio'] return Record(id=record['id'], name=name, type=record['type'], data=record['content'], zone=zone, driver=self.connection.driver, ttl=record['ttl'], extra=extra) def __res_to_zone(self, zone): return Zone(id=zone['id'], domain=zone['name'], type=DEFAULT_ZONE_TYPE, ttl=DEFAULT_ZONE_TTL, driver=self.connection.driver, extra={'created': zone['created'], 'servers': zone['servers'], 'account_id': zone['account_id'], 'cluster_id': zone['cluster_id']}) def __res_to_healthcheck(self, zone, healthcheck): return AuroraDNSHealthCheck(id=healthcheck['id'], type=healthcheck['type'], hostname=healthcheck['hostname'], ipaddress=healthcheck['ipaddress'], health=healthcheck['health'], threshold=healthcheck['threshold'], path=healthcheck['path'], interval=healthcheck['interval'], port=healthcheck['port'], enabled=healthcheck['enabled'], zone=zone, driver=self.connection.driver) def __merge_extra_data(self, rdata, extra): if extra is not None: for param in VALID_RECORD_PARAMS_EXTRA: if param in extra: rdata[param] = extra[param] return rdata apache-libcloud-2.2.1/libcloud/dns/drivers/zonomi.py0000664000175000017500000002617212701023453022347 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Zonomi DNS Driver """ import sys from libcloud.common.zonomi import ZonomiConnection, ZonomiResponse from libcloud.common.zonomi import ZonomiException from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.types import Provider, RecordType __all__ = [ 'ZonomiDNSDriver', ] class ZonomiDNSResponse(ZonomiResponse): pass class ZonomiDNSConnection(ZonomiConnection): responseCls = ZonomiDNSResponse class ZonomiDNSDriver(DNSDriver): type = Provider.ZONOMI name = 'Zonomi DNS' website = 'https://zonomi.com' connectionCls = ZonomiDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.MX: 'MX', RecordType.TXT: 'TXT' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ action = '/app/dns/dyndns.jsp?' params = {'action': 'QUERYZONES', 'api_key': self.key} response = self.connection.request(action=action, params=params) zones = self._to_zones(response.objects) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/app/dns/dyndns.jsp?' params = {'action': 'QUERY', 'name': '**.' + zone.id} try: response = self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if e.code == '404': raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e records = self._to_records(response.objects, zone) return records def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ zone = None zones = self.list_zones() for z in zones: if z.id == zone_id: zone = z if zone is None: raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value='') return zone def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ record = None zone = self.get_zone(zone_id=zone_id) records = self.list_records(zone=zone) for r in records: if r.id == record_id: record = r if record is None: raise RecordDoesNotExistError(record_id=record_id, driver=self, value='') return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param zone_id: Zone domain name (e.g. example.com) :type zone_id: ``str`` :rtype: :class:`Zone` """ action = '/app/dns/addzone.jsp?' params = {'name': domain} try: self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if e.message == 'ERROR: This zone is already in your zone list.': raise ZoneAlreadyExistsError(zone_id=domain, driver=self, value=e.message) raise e zone = Zone(id=domain, domain=domain, type='master', ttl=ttl, driver=self, extra=extra) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, MX, TXT). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific, e.g. 'prio' or 'ttl'). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ action = '/app/dns/dyndns.jsp?' if name: record_name = name + '.' + zone.domain else: record_name = zone.domain params = {'action': 'SET', 'name': record_name, 'value': data, 'type': type} if type == 'MX' and extra is not None: params['prio'] = extra.get('prio') try: response = self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if ('ERROR: No zone found for %s' % record_name) in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e # we determine if an A or MX record already exists # by looking at the response.If the key 'skipped' is present in the # response, it means record already exists. If this is True, # then raise RecordAlreadyExistsError if len(response.objects) != 0 and \ response.objects[0].get('skipped') == 'unchanged': raise RecordAlreadyExistsError(record_id=name, driver=self, value='') if 'DELETED' in response.objects: for el in response.objects[:2]: if el.get('content') == data: response.objects = [el] records = self._to_records(response.objects, zone=zone) return records[0] def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ action = '/app/dns/dyndns.jsp?' params = {'action': 'DELETEZONE', 'name': zone.id} try: response = self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if e.code == '404': raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e return 'DELETED' in response.objects def delete_record(self, record): """ Use this method to delete a record. :param record: record to delete :type record: `Record` :rtype: Bool """ action = '/app/dns/dyndns.jsp?' params = {'action': 'DELETE', 'name': record.name, 'type': record.type} try: response = self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if e.message == 'Record not deleted.': raise RecordDoesNotExistError(record_id=record.id, driver=self, value=e.message) raise e return 'DELETED' in response.objects def ex_convert_to_secondary(self, zone, master): """ Convert existent zone to slave. :param zone: Zone to convert. :type zone: :class:`Zone` :param master: the specified master name server IP address. :type master: ``str`` :rtype: Bool """ action = '/app/dns/converttosecondary.jsp?' params = {'name': zone.domain, 'master': master} try: self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if 'ERROR: Could not find' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) return True def ex_convert_to_master(self, zone): """ Convert existent zone to master. :param zone: Zone to convert. :type zone: :class:`Zone` :rtype: Bool """ action = '/app/dns/converttomaster.jsp?' params = {'name': zone.domain} try: self.connection.request(action=action, params=params) except ZonomiException: e = sys.exc_info()[1] if 'ERROR: Could not find' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) return True def _to_zone(self, item): if item['type'] == 'NATIVE': type = 'master' elif item['type'] == 'SLAVE': type = 'slave' zone = Zone(id=item['name'], domain=item['name'], type=type, driver=self, extra={}, ttl=None) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): if len(item.get('ttl')) > 0: ttl = item.get('ttl').split(' ')[0] else: ttl = None extra = {'ttl': ttl, 'prio': item.get('prio')} if len(item['name']) > len(zone.domain): full_domain = item['name'] index = full_domain.index('.' + zone.domain) record_name = full_domain[:index] else: record_name = zone.domain record = Record(id=record_name, name=record_name, data=item['content'], type=item['type'], zone=zone, driver=self, ttl=ttl, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/nsone.py0000664000175000017500000003002412705475361022162 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys try: import simplejson as json except ImportError: import json from libcloud.dns.types import Provider, ZoneDoesNotExistError, \ ZoneAlreadyExistsError, RecordDoesNotExistError, RecordAlreadyExistsError from libcloud.utils.py3 import httplib from libcloud.dns.base import DNSDriver, Zone, Record, RecordType from libcloud.common.nsone import NsOneConnection, NsOneResponse, \ NsOneException __all__ = [ 'NsOneDNSDriver' ] class NsOneDNSResponse(NsOneResponse): pass class NsOneDNSConnection(NsOneConnection): responseCls = NsOneDNSResponse class NsOneDNSDriver(DNSDriver): name = 'NS1 DNS' website = 'https://ns1.com' type = Provider.NSONE connectionCls = NsOneDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT' } def list_zones(self): action = '/v1/zones' response = self.connection.request(action=action, method='GET') zones = self._to_zones(items=response.parse_body()) return zones def get_zone(self, zone_id): """ :param zone_id: Zone domain name (e.g. example.com) :return: :class:`Zone` """ action = '/v1/zones/%s' % zone_id try: response = self.connection.request(action=action, method='GET') except NsOneException: e = sys.exc_info()[1] if e.message == 'zone not found': raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone_id) else: raise e zone = self._to_zone(response.objects[0]) return zone def create_zone(self, domain, type='master', ttl=None, extra=None): """ :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (This is not really used. See API docs for extra parameters) :type type: ``str`` :param ttl: TTL for new records (This is used through the extra param) :type ttl: ``int`` :param extra: Extra attributes that are specific to the driver such as ttl. :type extra: ``dict`` :rtype: :class:`Zone` """ action = '/v1/zones/%s' % domain raw_data = {'zone': domain} if extra is not None: raw_data.update(extra) post_data = json.dumps(raw_data) try: response = self.connection.request(action=action, method='PUT', data=post_data) except NsOneException: e = sys.exc_info()[1] if e.message == 'zone already exists': raise ZoneAlreadyExistsError(value=e.message, driver=self, zone_id=domain) else: raise e zone = self._to_zone(response.objects[0]) return zone def delete_zone(self, zone): """ :param zone: Zone to be deleted. :type zone: :class:`Zone` :return: Boolean """ action = '/v1/zones/%s' % zone.domain """zones_list = self.list_zones() if not self.ex_zone_exists(zone_id=zone.id, zones_list=zones_list): raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.id) """ try: response = self.connection.request(action=action, method='DELETE') except NsOneException: e = sys.exc_info()[1] if e.message == 'zone not found': raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone.id) else: raise e return response.status == httplib.OK def list_records(self, zone): """ :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/v1/zones/%s' % zone.domain try: response = self.connection.request(action=action, method='GET') except NsOneException: e = sys.exc_info()[1] if e.message == 'zone not found': raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone.id) else: raise e records = self._to_records(items=response.parse_body()['records'], zone=zone) return records def get_record(self, zone_id, record_id): """ :param zone_id: The id of the zone where to search for the record (e.g. example.com) :type zone_id: ``str`` :param record_id: The type of record to search for (e.g. A, AAA, MX etc) :return: :class:`Record` """ action = '/v1/zones/%s/%s/%s' % (zone_id, zone_id, record_id) try: response = self.connection.request(action=action, method='GET') except NsOneException: e = sys.exc_info()[1] if e.message == 'record not found': raise RecordDoesNotExistError(value=e.message, driver=self, record_id=record_id) else: raise e zone = self.get_zone(zone_id=zone_id) record = self._to_record(item=response.parse_body(), zone=zone) return record def delete_record(self, record): """ :param record: Record to delete. :type record: :class:`Record` :return: Boolean """ action = '/v1/zones/%s/%s/%s' % (record.zone.domain, record.name, record.type) try: response = self.connection.request(action=action, method='DELETE') except NsOneException: e = sys.exc_info()[1] if e.message == 'record not found': raise RecordDoesNotExistError(value=e.message, driver=self, record_id=record.id) else: raise e return response.status == httplib.OK def create_record(self, name, zone, type, data, extra=None): """ :param name: Name of the record to create (e.g. foo). :type name: ``str`` :param zone: Zone where the record should be created. :type zone: :class:`Zone` :param type: Type of record (e.g. A, MX etc) :type type: ``str`` :param data: Data of the record (e.g. 127.0.0.1 for the A record) :type data: ``str`` :param extra: Extra data needed to create different types of records :type extra: ``dict`` :return: :class:`Record` """ action = '/v1/zones/%s/%s/%s' % (zone.domain, '%s.%s' % (name, zone.domain), type) raw_data = { "answers": [ { "answer": [ data ], } ], "type": type, "domain": '%s.%s' % (name, zone.domain), "zone": zone.domain } if extra is not None and extra.get('answers'): raw_data['answers'] = extra.get('answers') post_data = json.dumps(raw_data) try: response = self.connection.request(action=action, method='PUT', data=post_data) except NsOneException: e = sys.exc_info()[1] if e.message == 'record already exists': raise RecordAlreadyExistsError(value=e.message, driver=self, record_id='') else: raise e record = self._to_record(item=response.parse_body(), zone=zone) return record def update_record(self, record, name, type, data, extra=None): """ :param record: Record to update :type record: :class:`Record` :param name: Name of the record to update (e.g. foo). :type name: ``str`` :param type: Type of record (e.g. A, MX etc) :type type: ``str`` :param data: Data of the record (e.g. 127.0.0.1 for the A record) :type data: ``str`` :param extra: Extra data needed to create different types of records :type extra: ``dict`` :return: :class:`Record` """ zone = record.zone action = '/v1/zones/%s/%s/%s' % (zone.domain, '%s.%s' % (name, zone.domain), type) raw_data = { "answers": [ { "answer": [ data ], } ] } if extra is not None and extra.get('answers'): raw_data['answers'] = extra.get('answers') post_data = json.dumps(raw_data) try: response = self.connection.request(action=action, data=post_data, method='POST') except NsOneException: e = sys.exc_info()[1] if e.message == 'record does not exist': raise RecordDoesNotExistError(value=e.message, driver=self, record_id=record.id) else: raise e record = self._to_record(item=response.parse_body(), zone=zone) return record def ex_zone_exists(self, zone_id, zones_list): """ Function to check if a `Zone` object exists. :param zone_id: ID of the `Zone` object. :type zone_id: ``str`` :param zones_list: A list containing `Zone` objects. :type zones_list: ``list``. :rtype: Returns `True` or `False`. """ zone_ids = [] for zone in zones_list: zone_ids.append(zone.id) return zone_id in zone_ids def _to_zone(self, item): common_attr = ['zone', 'id', 'type'] extra = {} for key in item.keys(): if key not in common_attr: extra[key] = item.get(key) zone = Zone(domain=item['zone'], id=item['id'], type=item.get('type'), extra=extra, ttl=extra.get('ttl'), driver=self) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): common_attr = ['id', 'short_answers', 'answers', 'domain', 'type'] extra = {} for key in item.keys(): if key not in common_attr: extra[key] = item.get(key) if item.get('answers') is not None: data = item.get('answers')[0]['answer'] else: data = item.get('short_answers') record = Record(id=item['id'], name=item['domain'], type=item['type'], data=data, zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/worldwidedns.py0000664000175000017500000004477612704474244023566 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ World Wide DNS Driver """ __all__ = [ 'WorldWideDNSDriver' ] import re from libcloud.common.types import LibcloudError from libcloud.common.worldwidedns import WorldWideDNSConnection from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError from libcloud.dns.types import RecordError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record MAX_RECORD_ENTRIES = 40 # Maximum record entries for zone class WorldWideDNSError(LibcloudError): def __repr__(self): return ("") class WorldWideDNSDriver(DNSDriver): type = Provider.WORLDWIDEDNS name = 'World Wide DNS' website = 'https://www.worldwidedns.net/' connectionCls = WorldWideDNSConnection RECORD_TYPE_MAP = { RecordType.MX: 'MX', RecordType.CNAME: 'CNAME', RecordType.A: 'A', RecordType.NS: 'NS', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } def __init__(self, key, secret=None, reseller_id=None, secure=True, host=None, port=None, **kwargs): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param reseller_id: Reseller ID for reseller accounts :type reseller_id: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: ``None`` """ super(WorldWideDNSDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) self.reseller_id = reseller_id def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` For more info, please see: https://www.worldwidedns.net/dns_api_protocol_list.asp or https://www.worldwidedns.net/dns_api_protocol_list_reseller.asp """ action = '/api_dns_list.asp' if self.reseller_id is not None: action = '/api_dns_list_reseller.asp' zones = self.connection.request(action) if len(zones.body) == 0: return [] else: return self._to_zones(zones.body) def iterate_records(self, zone): """ Return a generator to iterate over records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :rtype: ``generator`` of :class:`Record` """ records = self._to_records(zone) for record in records: yield record def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ zones = self.list_zones() zone = [zone for zone in zones if zone.id == zone_id] if len(zone) == 0: raise ZoneDoesNotExistError(driver=self, value="The zone doesn't exists", zone_id=zone_id) return zone[0] def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID number of the required record. :type record_id: ``str`` :rtype: :class:`Record` """ zone = self.get_zone(zone_id) try: if int(record_id) not in range(1, MAX_RECORD_ENTRIES + 1): raise RecordDoesNotExistError(value="Record doesn't exists", driver=zone.driver, record_id=record_id) except ValueError: raise WorldWideDNSError( value="Record id should be a string number", driver=self) subdomain = zone.extra.get('S%s' % record_id) type = zone.extra.get('T%s' % record_id) data = zone.extra.get('D%s' % record_id) record = self._to_record(record_id, subdomain, type, data, zone) return record def update_zone(self, zone, domain, type='master', ttl=None, extra=None, ex_raw=False): """ Update an existing zone. :param zone: Zone to update. :type zone: :class:`Zone` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (driver specific) (optional). Values not specified such as *SECURE*, *IP*, *FOLDER*, *HOSTMASTER*, *REFRESH*, *RETRY* and *EXPIRE* will be kept as already is. The same will be for *S(1 to 40)*, *T(1 to 40)* and *D(1 to 40)* if not in raw mode and for *ZONENS* and *ZONEDATA* if it is. :type extra: ``dict`` :param ex_raw: Mode we use to do the update using zone file or not. :type ex_raw: ``bool`` :rtype: :class:`Zone` For more info, please see https://www.worldwidedns.net/dns_api_protocol_list_domain.asp or https://www.worldwidedns.net/dns_api_protocol_list_domain_raw.asp or https://www.worldwidedns.net/dns_api_protocol_list_domain_reseller.asp or https://www.worldwidedns.net/dns_api_protocol_list_domain_raw_reseller.asp """ if extra is not None: not_specified = [key for key in zone.extra.keys() if key not in extra.keys()] else: not_specified = zone.extra.keys() if ttl is None: ttl = zone.ttl params = {'DOMAIN': domain, 'TTL': ttl} for key in not_specified: params[key] = zone.extra[key] if extra is not None: params.update(extra) if ex_raw: action = '/api_dns_modify_raw.asp' if self.reseller_id is not None: action = '/api_dns_modify_raw_reseller.asp' method = 'POST' else: action = '/api_dns_modify.asp' if self.reseller_id is not None: action = '/api_dns_modify_reseller.asp' method = 'GET' response = self.connection.request(action, params=params, # noqa method=method) zone = self.get_zone(zone.id) return zone def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (MX, CNAME, A, NS, SRV, TXT). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Contains 'entry' Entry position (1 thru 40) :type extra: ``dict`` :rtype: :class:`Record` """ if (extra is None) or ('entry' not in extra): raise WorldWideDNSError(value="You must enter 'entry' parameter", driver=self) record_id = extra.get('entry') if name == '': name = '@' if type not in self.RECORD_TYPE_MAP: raise RecordError(value="Record type is not allowed", driver=record.zone.driver, record_id=name) zone = record.zone extra = {'S%s' % record_id: name, 'T%s' % record_id: type, 'D%s' % record_id: data} zone = self.update_zone(zone, zone.domain, extra=extra) record = self.get_record(zone.id, record_id) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional). Possible parameter in here should be *DYN* which values should be 1 for standart and 2 for dynamic. Default is 1. :type extra: ``dict`` :rtype: :class:`Zone` For more info, please see https://www.worldwidedns.net/dns_api_protocol_new_domain.asp or https://www.worldwidedns.net/dns_api_protocol_new_domain_reseller.asp """ if type == 'master': _type = 0 elif type == 'slave': _type = 1 if extra: dyn = extra.get('DYN') or 1 else: dyn = 1 params = {'DOMAIN': domain, 'TYPE': _type} action = '/api_dns_new_domain.asp' if self.reseller_id is not None: params['DYN'] = dyn action = '/api_dns_new_domain_reseller.asp' self.connection.request(action, params=params) zone = self.get_zone(domain) if ttl is not None: zone = self.update_zone(zone, zone.domain, ttl=ttl) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. We can create 40 record per domain. If all slots are full, we can replace one of them by choosing a specific entry in ``extra`` argument. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (MX, CNAME, A, NS, SRV, TXT). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Contains 'entry' Entry position (1 thru 40) :type extra: ``dict`` :rtype: :class:`Record` """ if (extra is None) or ('entry' not in extra): # If no entry is specified, we look for an available one. If all # are full, raise error. record_id = self._get_available_record_entry(zone) if not record_id: raise WorldWideDNSError(value="All record entries are full", driver=zone.driver) else: record_id = extra.get('entry') if name == '': name = '@' if type not in self.RECORD_TYPE_MAP: raise RecordError(value="Record type is not allowed", driver=zone.driver, record_id=record_id) extra = {'S%s' % record_id: name, 'T%s' % record_id: type, 'D%s' % record_id: data} zone = self.update_zone(zone, zone.domain, extra=extra) record = self.get_record(zone.id, record_id) return record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` For more information, please see https://www.worldwidedns.net/dns_api_protocol_delete_domain.asp or https://www.worldwidedns.net/dns_api_protocol_delete_domain_reseller.asp """ params = {'DOMAIN': zone.domain} action = '/api_dns_delete_domain.asp' if self.reseller_id is not None: action = '/api_dns_delete_domain_reseller.asp' response = self.connection.request(action, params=params) return response.success() def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ zone = record.zone for index in range(MAX_RECORD_ENTRIES): if record.name == zone.extra['S%s' % (index + 1)]: entry = index + 1 break extra = {'S%s' % entry: '', 'T%s' % entry: 'NONE', 'D%s' % entry: ''} self.update_zone(zone, zone.domain, extra=extra) return True def ex_view_zone(self, domain, name_server): """ View zone file from a name server :param domain: Domain name. :type domain: ``str`` :param name_server: Name server to check. (1, 2 or 3) :type name_server: ``int`` :rtype: ``str`` For more info, please see: https://www.worldwidedns.net/dns_api_protocol_viewzone.asp or https://www.worldwidedns.net/dns_api_protocol_viewzone_reseller.asp """ params = {'DOMAIN': domain, 'NS': name_server} action = '/api_dns_viewzone.asp' if self.reseller_id is not None: action = '/api_dns_viewzone_reseller.asp' response = self.connection.request(action, params=params) return response.object def ex_transfer_domain(self, domain, user_id): """ This command will allow you, if you are a reseller, to change the userid on a domain name to another userid in your account ONLY if that new userid is already created. :param domain: Domain name. :type domain: ``str`` :param user_id: The new userid to connect to the domain name. :type user_id: ``str`` :rtype: ``bool`` For more info, please see: https://www.worldwidedns.net/dns_api_protocol_transfer.asp """ if self.reseller_id is None: raise WorldWideDNSError("This is not a reseller account", driver=self) params = {'DOMAIN': domain, 'NEW_ID': user_id} response = self.connection.request('/api_dns_transfer.asp', params=params) return response.success() def _get_available_record_entry(self, zone): """Return an available entry to store a record.""" entries = zone.extra for entry in range(1, MAX_RECORD_ENTRIES + 1): subdomain = entries.get('S%s' % entry) _type = entries.get('T%s' % entry) data = entries.get('D%s' % entry) if not any([subdomain, _type, data]): return entry return None def _to_zones(self, data): domain_list = re.split('\r?\n', data) zones = [] for line in domain_list: zone = self._to_zone(line) zones.append(zone) return zones def _to_zone(self, line): data = line.split('\x1f') name = data[0] if data[1] == "P": type = "master" domain_data = self._get_domain_data(name) resp_lines = re.split('\r?\n', domain_data.body) soa_block = resp_lines[:6] zone_data = resp_lines[6:] extra = {'HOSTMASTER': soa_block[0], 'REFRESH': soa_block[1], 'RETRY': soa_block[2], 'EXPIRE': soa_block[3], 'SECURE': soa_block[5]} ttl = soa_block[4] for line in range(MAX_RECORD_ENTRIES): line_data = zone_data[line].split('\x1f') extra['S%s' % (line + 1)] = line_data[0] _type = line_data[1] extra['T%s' % (line + 1)] = _type if _type != 'NONE' else '' try: extra['D%s' % (line + 1)] = line_data[2] except IndexError: extra['D%s' % (line + 1)] = '' elif data[1] == 'S': type = 'slave' extra = {} ttl = 0 return Zone(id=name, domain=name, type=type, ttl=ttl, driver=self, extra=extra) def _get_domain_data(self, name): params = {'DOMAIN': name} data = self.connection.request('/api_dns_list_domain.asp', params=params) return data def _to_records(self, zone): records = [] for record_id in range(1, MAX_RECORD_ENTRIES + 1): subdomain = zone.extra['S%s' % (record_id)] type = zone.extra['T%s' % (record_id)] data = zone.extra['D%s' % (record_id)] if subdomain and type and data: record = self._to_record( record_id, subdomain, type, data, zone) records.append(record) return records def _to_record(self, _id, subdomain, type, data, zone): return Record(id=_id, name=subdomain, type=type, data=data, zone=zone, driver=zone.driver) apache-libcloud-2.2.1/libcloud/dns/drivers/powerdns.py0000664000175000017500000004252512705475361022712 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PowerDNS Driver """ import json import sys from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.exceptions import BaseHTTPError from libcloud.common.types import InvalidCredsError, MalformedResponseError from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import Provider, RecordType from libcloud.utils.py3 import httplib __all__ = [ 'PowerDNSDriver', ] class PowerDNSResponse(JsonResponse): def success(self): i = int(self.status) return i >= 200 and i <= 299 def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError('Invalid provider credentials') try: body = self.parse_body() except MalformedResponseError: e = sys.exc_info()[1] body = '%s: %s' % (e.value, e.body) try: errors = [body['error']] except TypeError: # parse_body() gave us a simple string, not a dict. return '%s (HTTP Code: %d)' % (body, self.status) try: errors.append(body['errors']) except KeyError: # The PowerDNS API does not return the "errors" list all the time. pass return '%s (HTTP Code: %d)' % (' '.join(errors), self.status) class PowerDNSConnection(ConnectionKey): responseCls = PowerDNSResponse def add_default_headers(self, headers): headers['X-API-Key'] = self.key return headers class PowerDNSDriver(DNSDriver): type = Provider.POWERDNS name = 'PowerDNS' website = 'https://www.powerdns.com/' connectionCls = PowerDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.AFSDB: 'AFSDB', RecordType.CERT: 'CERT', RecordType.CNAME: 'CNAME', RecordType.DNSKEY: 'DNSKEY', RecordType.DS: 'DS', RecordType.HINFO: 'HINFO', RecordType.KEY: 'KEY', RecordType.LOC: 'LOC', RecordType.MX: 'MX', RecordType.NAPTR: 'NAPTR', RecordType.NS: 'NS', RecordType.NSEC: 'NSEC', RecordType.OPENPGPKEY: 'OPENPGPKEY', RecordType.PTR: 'PTR', RecordType.RP: 'RP', RecordType.RRSIG: 'RRSIG', RecordType.SOA: 'SOA', RecordType.SPF: 'SPF', RecordType.SSHFP: 'SSHFP', RecordType.SRV: 'SRV', RecordType.TLSA: 'TLSA', RecordType.TXT: 'TXT', } def __init__(self, key, secret=None, secure=False, host=None, port=None, api_version='experimental', **kwargs): """ PowerDNS Driver defaulting to using PowerDNS 3.x API (ie "experimental"). :param key: API key or username to used (required) :type key: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Off by default for PowerDNS. :type secure: ``bool`` :param host: Hostname used for connections. :type host: ``str`` :param port: Port used for connections. :type port: ``int`` :param api_version: Specifies the API version to use. ``experimental`` and ``v1`` are the only valid options. Defaults to using ``experimental`` (optional) :type api_version: ``str`` :return: ``None`` """ # libcloud doesn't really have a concept of "servers". We'll just use # localhost for now. self.ex_server = 'localhost' if api_version == 'experimental': # PowerDNS 3.x has no API root prefix. self.api_root = '' elif api_version == 'v1': # PowerDNS 4.x has an '/api/v1' root prefix. self.api_root = '/api/v1' else: raise NotImplementedError('Unsupported API version: %s' % api_version) super(PowerDNSDriver, self).__init__(key=key, secure=secure, host=host, port=port, **kwargs) def create_record(self, name, zone, type, data, extra=None): """ Create a new record. There are two PowerDNS-specific quirks here. Firstly, this method will silently clobber any pre-existing records that might already exist. For example, if PowerDNS already contains a "test.example.com" A record, and you create that record using this function, then the old A record will be replaced with your new one. Secondly, PowerDNS requires that you provide a ttl for all new records. In other words, the "extra" parameter must be ``{'ttl': }`` at a minimum. :param name: FQDN of the new record, for example "www.example.com". :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific, e.g. 'ttl'). Note that PowerDNS *requires* a ttl value for every record. :type extra: ``dict`` :rtype: :class:`Record` """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, zone.id) if extra is None or extra.get('ttl', None) is None: raise ValueError('PowerDNS requires a ttl value for every record') record = { 'content': data, 'disabled': False, 'name': name, 'ttl': extra['ttl'], 'type': type, } payload = {'rrsets': [{'name': name, 'type': type, 'changetype': 'REPLACE', 'records': [record] }] } try: self.connection.request(action=action, data=json.dumps(payload), method='PATCH') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.UNPROCESSABLE_ENTITY and \ e.message.startswith('Could not find domain'): raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e return Record(id=None, name=name, data=data, type=type, zone=zone, driver=self, ttl=extra['ttl']) def create_zone(self, domain, type=None, ttl=None, extra={}): """ Create a new zone. There are two PowerDNS-specific quirks here. Firstly, the "type" and "ttl" parameters are ignored (no-ops). The "type" parameter is simply not implemented, and PowerDNS does not have an ability to set a zone-wide default TTL. (TTLs must be set per-record.) Secondly, PowerDNS requires that you provide a list of nameservers for the zone upon creation. In other words, the "extra" parameter must be ``{'nameservers': ['ns1.example.org']}`` at a minimum. :param name: Zone domain name (e.g. example.com) :type name: ``str`` :param domain: Zone type (master / slave). (optional). Note that the PowerDNS driver does nothing with this parameter. :type domain: :class:`Zone` :param ttl: TTL for new records. (optional). Note that the PowerDNS driver does nothing with this parameter. :type ttl: ``int`` :param extra: Extra attributes (driver specific). For example, specify ``extra={'nameservers': ['ns1.example.org']}`` to set a list of nameservers for this new zone. :type extra: ``dict`` :rtype: :class:`Zone` """ action = '%s/servers/%s/zones' % (self.api_root, self.ex_server) if extra is None or extra.get('nameservers', None) is None: msg = 'PowerDNS requires a list of nameservers for every new zone' raise ValueError(msg) payload = {'name': domain, 'kind': 'Native'} payload.update(extra) zone_id = domain + '.' try: self.connection.request(action=action, data=json.dumps(payload), method='POST') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.UNPROCESSABLE_ENTITY and \ e.message.startswith("Domain '%s' already exists" % domain): raise ZoneAlreadyExistsError(zone_id=zone_id, driver=self, value=e.message) raise e return Zone(id=zone_id, domain=domain, type=None, ttl=None, driver=self, extra=extra) def delete_record(self, record): """ Use this method to delete a record. :param record: record to delete :type record: `Record` :rtype: ``bool`` """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, record.zone.id) payload = {'rrsets': [{'name': record.name, 'type': record.type, 'changetype': 'DELETE', }] } try: self.connection.request(action=action, data=json.dumps(payload), method='PATCH') except BaseHTTPError: # I'm not sure if we should raise a ZoneDoesNotExistError here. The # base DNS API only specifies that we should return a bool. So, # let's ignore this code for now. # e = sys.exc_info()[1] # if e.code == httplib.UNPROCESSABLE_ENTITY and \ # e.message.startswith('Could not find domain'): # raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, # value=e.message) # raise e return False return True def delete_zone(self, zone): """ Use this method to delete a zone. :param zone: zone to delete :type zone: `Zone` :rtype: ``bool`` """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, zone.id) try: self.connection.request(action=action, method='DELETE') except BaseHTTPError: # I'm not sure if we should raise a ZoneDoesNotExistError here. The # base DNS API only specifies that we should return a bool. So, # let's ignore this code for now. # e = sys.exc_info()[1] # if e.code == httplib.UNPROCESSABLE_ENTITY and \ # e.message.startswith('Could not find domain'): # raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, # value=e.message) # raise e return False return True def get_zone(self, zone_id): """ Return a Zone instance. (Note that PowerDNS does not support per-zone TTL defaults, so all Zone objects will have ``ttl=None``.) :param zone_id: name of the required zone with the trailing period, for example "example.com.". :type zone_id: ``str`` :rtype: :class:`Zone` :raises: ZoneDoesNotExistError: If no zone could be found. """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, zone_id) try: response = self.connection.request(action=action, method='GET') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.UNPROCESSABLE_ENTITY: raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value=e.message) raise e return self._to_zone(response.object) def list_records(self, zone): """ Return a list of all records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, zone.id) try: response = self.connection.request(action=action, method='GET') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.UNPROCESSABLE_ENTITY and \ e.message.startswith('Could not find domain'): raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e return self._to_records(response, zone) def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ action = '%s/servers/%s/zones' % (self.api_root, self.ex_server) response = self.connection.request(action=action, method='GET') return self._to_zones(response) def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: FQDN of the new record, for example "www.example.com". :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ action = '%s/servers/%s/zones/%s' % (self.api_root, self.ex_server, record.zone.id) if extra is None or extra.get('ttl', None) is None: raise ValueError('PowerDNS requires a ttl value for every record') updated_record = { 'content': data, 'disabled': False, 'name': name, 'ttl': extra['ttl'], 'type': type, } payload = {'rrsets': [{'name': record.name, 'type': record.type, 'changetype': 'DELETE', }, {'name': name, 'type': type, 'changetype': 'REPLACE', 'records': [updated_record] }] } try: self.connection.request(action=action, data=json.dumps(payload), method='PATCH') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.UNPROCESSABLE_ENTITY and \ e.message.startswith('Could not find domain'): raise ZoneDoesNotExistError(zone_id=record.zone.id, driver=self, value=e.message) raise e return Record(id=None, name=name, data=data, type=type, zone=record.zone, driver=self, ttl=extra['ttl']) def _to_zone(self, item): extra = {} for e in ['kind', 'dnssec', 'account', 'masters', 'serial', 'notified_serial', 'last_check']: extra[e] = item[e] # XXX: we have to hard-code "ttl" to "None" here because PowerDNS does # not support per-zone ttl defaults. However, I don't know what "type" # should be; probably not None. return Zone(id=item['id'], domain=item['name'], type=None, ttl=None, driver=self, extra=extra) def _to_zones(self, items): zones = [] for item in items.object: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): return Record(id=None, name=item['name'], data=item['content'], type=item['type'], zone=zone, driver=self, ttl=item['ttl']) def _to_records(self, items, zone): records = [] for item in items.object['records']: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/dnspod.py0000664000175000017500000002730013153541406022322 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.dns.types import Provider, ZoneDoesNotExistError, \ ZoneAlreadyExistsError, RecordDoesNotExistError, RecordAlreadyExistsError from libcloud.dns.base import DNSDriver, Zone, Record, RecordType from libcloud.common.dnspod import DNSPodConnection, DNSPodResponse, \ DNSPodException from libcloud.utils.py3 import urlencode __all__ = [ 'DNSPodDNSDriver' ] ZONE_ALREADY_EXISTS_ERROR_MSGS = ['Domain is exists', 'Domain already exists as ' 'an alias of another domain'] ZONE_DOES_NOT_EXIST_ERROR_MSGS = ['Domain not under you or your user', 'Domain id invalid'] RECORD_DOES_NOT_EXIST_ERRORS_MSGS = ['Record id invalid'] class DNSPodDNSResponse(DNSPodResponse): pass class DNSPodDNSConnection(DNSPodConnection): responseCls = DNSPodDNSResponse class DNSPodDNSDriver(DNSDriver): name = 'DNSPod' website = 'https://dnspod.com' type = Provider.DNSPOD connectionCls = DNSPodDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT' } def _make_request(self, action, method, data=None): data = data or {} if not data.get('user_token'): data['user_token'] = self.key if not data.get('format'): data['format'] = 'json' data = urlencode(data) r = self.connection.request(action=action, method=method, data=data) return r def list_zones(self): action = '/Domain.List' try: response = self._make_request(action=action, method='POST') except DNSPodException: e = sys.exc_info()[1] if e.message == 'No domains': return [] zones = self._to_zones(items=response.object['domains']) return zones def delete_zone(self, zone): """ :param zone: Zone to be deleted. :type zone: :class:`Zone` :return: Boolean """ action = '/Domain.Remove' data = {'domain_id': zone.id} try: self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message in ZONE_DOES_NOT_EXIST_ERROR_MSGS: raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone.id) else: raise e return True def get_zone(self, zone_id): """ :param zone_id: Zone domain name (e.g. example.com) :return: :class:`Zone` """ action = '/Domain.Info' data = {'domain_id': zone_id} try: response = self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message in ZONE_DOES_NOT_EXIST_ERROR_MSGS: raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone_id) else: raise e zone = self._to_zone(response.object['domain']) return zone def create_zone(self, domain, type='master', ttl=None, extra=None): """ :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (This is not really used. See API docs for extra parameters) :type type: ``str`` :param ttl: TTL for new records (This is used through the extra param) :type ttl: ``int`` :param extra: Extra attributes that are specific to the driver such as ttl. :type extra: ``dict`` :rtype: :class:`Zone` """ action = '/Domain.Create' data = {'domain': domain} if extra is not None: data.update(extra) try: response = self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message in ZONE_ALREADY_EXISTS_ERROR_MSGS: raise ZoneAlreadyExistsError(value=e.message, driver=self, zone_id=domain) else: raise e zone = self._to_zone(response.object['domain']) return zone def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/Record.List' data = {'domain_id': zone.id} try: response = self._make_request(action=action, data=data, method='POST') except DNSPodException: e = sys.exc_info()[1] if e.message in ZONE_DOES_NOT_EXIST_ERROR_MSGS: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.id) else: raise e records = self._to_records(response.object['records'], zone=zone) return records def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ action = '/Record.Remove' data = {'domain_id': record.zone.id, 'record_id': record.id} try: self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message in RECORD_DOES_NOT_EXIST_ERRORS_MSGS: raise RecordDoesNotExistError(record_id=record.id, driver=self, value='') elif e.message in ZONE_DOES_NOT_EXIST_ERROR_MSGS: raise ZoneDoesNotExistError(zone_id=record.zone.id, driver=self, value='') else: raise e return True def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ zone = self.get_zone(zone_id=zone_id) action = '/Record.Info' data = {'domain_id': zone_id, 'record_id': record_id} try: response = self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message in RECORD_DOES_NOT_EXIST_ERRORS_MSGS: raise RecordDoesNotExistError(record_id=record_id, driver=self, value='') elif e.message in ZONE_DOES_NOT_EXIST_ERROR_MSGS: raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value='') else: raise e record = self._to_record(response.object['record'], zone=zone) return record def create_record(self, name, zone, type, data, extra=None): """ Create a record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone which the records will be created for. :type zone: :class:`Zone` :param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes ('prio', 'ttl'). :type extra: ``dict`` :rtype: :class:`Record` """ action = '/Record.Create' data = {'sub_domain': name, 'value': data, 'record_type': type, 'domain_id': zone.id} # ttl is optional # pass it through extra like this: extra={'ttl':ttl} # record_line is a required parameter # pass it through extra like this: extra={'record_line':'default'} # when creating MX records you need to pass mx through extra # mx ranges from 1 to 20 # extra = {'ttl': '13', 'record_line': default, 'mx': 1} if extra is not None: data.update(extra) try: response = self._make_request(action=action, method='POST', data=data) except DNSPodException: e = sys.exc_info()[1] if e.message == ('Record impacted, same record exists ' 'or CNAME/URL impacted'): raise RecordAlreadyExistsError(record_id='', driver=self, value=name) raise e record_id = response.object['record'].get('id') record = self.get_record(zone_id=zone.id, record_id=record_id) return record def _to_zone(self, item): common_attr = ['name', 'id', 'ttl'] extra = {} for key in item.keys(): if key not in common_attr: extra[key] = item.get(key) zone = Zone(domain=item.get('name') or item.get('domain'), id=item.get('id'), type=None, extra=extra, ttl=item.get('ttl'), driver=self) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): common_attr = ['id', 'value', 'name', 'type'] extra = {} for key in item: if key not in common_attr: extra[key] = item.get(key) record = Record(id=item.get('id'), name=item.get('name') or item.get('sub_domain'), type=item.get('type') or item.get('record_type'), data=item.get('value'), zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/digitalocean.py0000664000175000017500000002353312701023453023455 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Digital Ocean DNS Driver """ __all__ = [ 'DigitalOceanDNSDriver' ] from libcloud.utils.py3 import httplib from libcloud.common.digitalocean import DigitalOcean_v2_BaseDriver from libcloud.common.digitalocean import DigitalOcean_v2_Connection from libcloud.dns.types import Provider, RecordType from libcloud.dns.base import DNSDriver, Zone, Record class DigitalOceanDNSDriver(DigitalOcean_v2_BaseDriver, DNSDriver): connectionCls = DigitalOcean_v2_Connection type = Provider.DIGITAL_OCEAN name = "DigitalOcean" website = 'https://www.digitalocean.com' RECORD_TYPE_MAP = { RecordType.NS: 'NS', RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.TXT: 'TXT', RecordType.SRV: 'SRV', } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ data = self._paginated_request('/v2/domains', 'domains') return list(map(self._to_zone, data)) def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ data = self._paginated_request('/v2/domains/%s/records' % (zone.id), 'domain_records') # TODO: Not use list comprehension to add zone to record for proper data map # functionality? This passes a reference to zone for each data currently # to _to_record which returns a Record. map() does not take keywords return list(map(self._to_record, data, [zone for z in data])) def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ data = self.connection.request('/v2/domains/%s' % (zone_id)).object['domain'] return self._to_zone(data) def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ data = self.connection.request('/v2/domains/%s/records/%s' % (zone_id, record_id)).object['domain_record'] # TODO: Any way of not using get_zone which polls the API again # without breaking the DNSDriver.get_record parameters? return self._to_record(data, self.get_zone(zone_id)) def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (master / slave) (does nothing). :type type: ``str`` :param ttl: TTL for new records. (does nothing) :type ttl: ``int`` :param extra: Extra attributes (to set ip). (optional) Note: This can be used to set the default A record with {"ip" : "IP.AD.DR.ESS"} otherwise 127.0.0.1 is used :type extra: ``dict`` :rtype: :class:`Zone` """ params = {'name': domain} try: params['ip_address'] = extra['ip'] except: params['ip_address'] = '127.0.0.1' res = self.connection.request('/v2/domains', params=params, method='POST') return Zone(id=res.object['domain']['name'], domain=res.object['domain']['name'], type='master', ttl=1800, driver=self, extra={}) def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes for MX and SRV. (Depends on record) {"priority" : 0, "port" : 443, "weight" : 100} :type extra: ``dict`` :rtype: :class:`Record` """ params = { "type": self.RECORD_TYPE_MAP[type], "name": name, "data": data } if extra: try: params['priority'] = extra['priority'] except KeyError: params['priority'] = 'null' try: params['port'] = extra['port'] except KeyError: params['port'] = 'null' try: params['weight'] = extra['weight'] except KeyError: params['weight'] = 'null' res = self.connection.request('/v2/domains/%s/records' % zone.id, params=params, method='POST') return Record(id=res.object['domain_record']['id'], name=res.object['domain_record']['name'], type=type, data=data, zone=zone, driver=self, extra=extra) def update_record(self, record, name=None, type=None, data=None, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). (Ignored) Note: The value is pulled from the record being updated :type name: ``str`` :param type: DNS record type (A, AAAA, ...). (Ignored) Note: Updating records does not support changing type so this value is ignored :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ params = { "type": record.type, "name": record.name, "data": data } if data is None: params['data'] = record.data if extra: try: params['priority'] = extra['priority'] except KeyError: params['priority'] = 'null' try: params['port'] = extra['port'] except KeyError: params['port'] = 'null' try: params['weight'] = extra['weight'] except KeyError: params['weight'] = 'null' res = self.connection.request('/v2/domains/%s/records/%s' % (record.zone.id, record.id), params=params, method='PUT') return Record(id=res.object['domain_record']['id'], name=res.object['domain_record']['name'], type=record.type, data=data, zone=record.zone, driver=self, extra=extra) def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ params = {} res = self.connection.request('/v2/domains/%s' % zone.id, params=params, method='DELETE') return res.status == httplib.NO_CONTENT def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ params = {} res = self.connection.request('/v2/domains/%s/records/%s' % ( record.zone.id, record.id), params=params, method='DELETE') return res.status == httplib.NO_CONTENT def _to_record(self, data, zone=None): extra = {'port': data['port'], 'priority': data['priority'], 'weight': data['weight']} return Record(id=data['id'], name=data['name'], type=self._string_to_record_type(data['type']), data=data['data'], zone=zone, driver=self, extra=extra) def _to_zone(self, data): extra = {'zone_file': data['zone_file']} return Zone(id=data['name'], domain=data['name'], type='master', ttl=data['ttl'], driver=self, extra=extra) apache-libcloud-2.2.1/libcloud/dns/drivers/google.py0000664000175000017500000003046112705475361022321 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'GoogleDNSDriver' ] # API docs: https://cloud.google.com/dns/api/v1 API_VERSION = 'v1' import re from libcloud.common.google import GoogleResponse, GoogleBaseConnection from libcloud.common.google import ResourceNotFoundError from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record class GoogleDNSResponse(GoogleResponse): pass class GoogleDNSConnection(GoogleBaseConnection): host = "www.googleapis.com" responseCls = GoogleDNSResponse def __init__(self, user_id, key, secure, auth_type=None, credential_file=None, project=None, **kwargs): super(GoogleDNSConnection, self).\ __init__(user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, **kwargs) self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project) class GoogleDNSDriver(DNSDriver): type = Provider.GOOGLE name = 'Google DNS' connectionCls = GoogleDNSConnection website = 'https://cloud.google.com/' RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } def __init__(self, user_id, key, project=None, auth_type=None, scopes=None, **kwargs): self.auth_type = auth_type self.project = project self.scopes = scopes if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') super(GoogleDNSDriver, self).__init__(user_id, key, **kwargs) def iterate_zones(self): """ Return a generator to iterate over available zones. :rtype: ``generator`` of :class:`Zone` """ return self._get_more('zones') def iterate_records(self, zone): """ Return a generator to iterate over records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :rtype: ``generator`` of :class:`Record` """ return self._get_more('records', zone=zone) def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ request = '/managedZones/%s' % (zone_id) try: response = self.connection.request(request, method='GET').object except ResourceNotFoundError: raise ZoneDoesNotExistError(value='', driver=self.connection.driver, zone_id=zone_id) return self._to_zone(response) def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ (record_type, record_name) = record_id.split(':', 1) params = { 'name': record_name, 'type': record_type, } request = '/managedZones/%s/rrsets' % (zone_id) try: response = self.connection.request(request, method='GET', params=params).object except ResourceNotFoundError: raise ZoneDoesNotExistError(value='', driver=self.connection.driver, zone_id=zone_id) if len(response['rrsets']) > 0: zone = self.get_zone(zone_id) return self._to_record(response['rrsets'][0], zone) raise RecordDoesNotExistError(value='', driver=self.connection.driver, record_id=record_id) def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com.) with a \'.\' at the end. :type domain: ``str`` :param type: Zone type (master is the only one supported). :type type: ``str`` :param ttl: TTL for new records. (unused) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ name = None description = '' if extra: description = extra.get('description') name = extra.get('name') if name is None: name = self._cleanup_domain(domain) data = { 'dnsName': domain, 'name': name, 'description': description, } request = '/managedZones' response = self.connection.request(request, method='POST', data=data).object return self._to_zone(response) def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name fully qualified, with a \'.\' at the end. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes. (optional) :type extra: ``dict`` :rtype: :class:`Record` """ ttl = data.get('ttl', 0) rrdatas = data.get('rrdatas', []) data = { 'additions': [ { 'name': name, 'type': type, 'ttl': int(ttl), 'rrdatas': rrdatas, } ] } request = '/managedZones/%s/changes' % (zone.id) response = self.connection.request(request, method='POST', data=data).object return self._to_record(response['additions'][0], zone) def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ request = '/managedZones/%s' % (zone.id) response = self.connection.request(request, method='DELETE') return response.success() def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ data = { 'deletions': [ { 'name': record.name, 'type': record.type, 'rrdatas': record.data['rrdatas'], 'ttl': record.data['ttl'] } ] } request = '/managedZones/%s/changes' % (record.zone.id) response = self.connection.request(request, method='POST', data=data) return response.success() def ex_bulk_record_changes(self, zone, records): """ Bulk add and delete records. :param zone: Zone where the requested record changes are done. :type zone: :class:`Zone` :param records: Dictionary of additions list or deletions list, or both of resourceRecordSets. For example: {'additions': [{'rrdatas': ['127.0.0.1'], 'kind': 'dns#resourceRecordSet', 'type': 'A', 'name': 'www.example.com.', 'ttl': '300'}], 'deletions': [{'rrdatas': ['127.0.0.1'], 'kind': 'dns#resourceRecordSet', 'type': 'A', 'name': 'www2.example.com.', 'ttl': '300'}]} :type records: ``dict`` :return: A dictionary of Record additions and deletions. :rtype: ``dict`` of additions and deletions of :class:`Record` """ request = '/managedZones/%s/changes' % (zone.id) response = self.connection.request(request, method='POST', data=records).object response = response or {} response_data = { 'additions': self._to_records(response.get('additions', []), zone), 'deletions': self._to_records(response.get('deletions', []), zone), } return response_data def _get_more(self, rtype, **kwargs): last_key = None exhausted = False while not exhausted: items, last_key, exhausted = self._get_data(rtype, last_key, **kwargs) for item in items: yield item def _get_data(self, rtype, last_key, **kwargs): params = {} if last_key: params['pageToken'] = last_key if rtype == 'zones': request = '/managedZones' transform_func = self._to_zones r_key = 'managedZones' elif rtype == 'records': zone = kwargs['zone'] request = '/managedZones/%s/rrsets' % (zone.id) transform_func = self._to_records r_key = 'rrsets' response = self.connection.request(request, method='GET', params=params,) if response.success(): nextpage = response.object.get('nextPageToken', None) items = transform_func(response.object.get(r_key), **kwargs) exhausted = False if nextpage is not None else True return items, nextpage, exhausted else: return [], None, True def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, 'project': self.project, 'scopes': self.scopes} def _to_zones(self, response): zones = [] for r in response: zones.append(self._to_zone(r)) return zones def _to_zone(self, r): extra = {} if 'description' in r: extra['description'] = r.get('description') extra['creationTime'] = r.get('creationTime') extra['nameServers'] = r.get('nameServers') extra['id'] = r.get('id') return Zone(id=r['name'], domain=r['dnsName'], type='master', ttl=0, driver=self, extra=extra) def _to_records(self, response, zone): records = [] for r in response: records.append(self._to_record(r, zone)) return records def _to_record(self, r, zone): record_id = '%s:%s' % (r['type'], r['name']) return Record(id=record_id, name=r['name'], type=r['type'], data=r, zone=zone, driver=self, ttl=r.get('ttl', None), extra={}) def _cleanup_domain(self, domain): # name can only contain lower case alphanumeric characters and hyphens domain = re.sub(r'[^a-zA-Z0-9-]', '-', domain) if domain[-1] == '-': domain = domain[:-1] return domain apache-libcloud-2.2.1/libcloud/dns/drivers/luadns.py0000664000175000017500000002146112701023453022316 0ustar kamikami00000000000000import sys try: import simplejson as json except ImportError: import json from libcloud.common.luadns import LuadnsResponse, LuadnsConnection from libcloud.common.luadns import LuadnsException from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError __all__ = [ 'LuadnsDNSDriver' ] class LuadnsDNSResponse(LuadnsResponse): pass class LuadnsDNSConnection(LuadnsConnection): responseCls = LuadnsDNSResponse class LuadnsDNSDriver(DNSDriver): type = Provider.LUADNS name = 'Luadns' website = 'https://www.luadns.com' connectionCls = LuadnsDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ action = '/v1/zones' response = self.connection.request(action=action, method='GET') zones = self._to_zones(response.parse_body()) return zones def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ action = '/v1/zones/%s' % zone_id try: response = self.connection.request(action=action) except LuadnsException: e = sys.exc_info()[1] if e.message in ['Zone not found.', 'Resource not found.']: raise ZoneDoesNotExistError(zone_id=zone_id, value='', driver=self) else: raise e zone = self._to_zone(response.parse_body()) return zone def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ action = '/v1/zones/%s' % zone.id try: response = self.connection.request(action=action, method='DELETE') except LuadnsException: e = sys.exc_info()[1] if e.message in ['Resource not found.', 'Zone not found.']: raise ZoneDoesNotExistError(zone_id=zone.id, value='', driver=self) else: raise e return response.status == 200 def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (This is not really used. See API docs for extra parameters). :type type: ``str`` :param ttl: TTL for new records. (This is not really used) :type ttl: ``int`` :param extra: Extra attributes (driver specific). ('region_support', 'zone_data') :type extra: ``dict`` :rtype: :class:`Zone` """ action = '/v1/zones' data = json.dumps({'name': domain}) try: response = self.connection.request(action=action, method='POST', data=data) except LuadnsException: e = sys.exc_info()[1] if e.message == "Zone '%s' is taken already." % domain: raise ZoneAlreadyExistsError(zone_id=domain, value='', driver=self) else: raise e zone = self._to_zone(response.parse_body()) return zone def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/v1/zones/%s/records' % zone.id response = self.connection.request(action=action) records = self._to_records(response.parse_body(), zone=zone) return records def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ zone = self.get_zone(zone_id=zone_id) action = '/v1/zones/%s/records/%s' % (zone_id, record_id) try: response = self.connection.request(action=action) except LuadnsException: e = sys.exc_info()[1] if e.message == 'Record not found.': raise RecordDoesNotExistError(record_id=record_id, driver=self, value='') else: raise e record = self._to_record(response.parse_body(), zone=zone) return record def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ action = '/v1/zones/%s/records/%s' % (record.zone.id, record.id) try: response = self.connection.request(action=action, method='DELETE') except LuadnsException: e = sys.exc_info()[1] if e.message == 'Record not found.': raise RecordDoesNotExistError(record_id=record.id, driver=self, value='') else: raise e return response.status == 200 def create_record(self, name, zone, type, data, extra=None): """ Create a record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone which the records will be created for. :type zone: :class:`Zone` :param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes ('prio', 'ttl'). :type extra: ``dict`` :rtype: :class:`Record` """ action = '/v1/zones/%s/records' % zone.id to_post = {'name': name, 'content': data, 'type': type, 'zone_id': int(zone.id)} # ttl is required to create a record for luadns # pass it through extra like this: extra={'ttl':ttl} if extra is not None: to_post.update(extra) data = json.dumps(to_post) try: response = self.connection.request(action=action, method='POST', data=data) except LuadnsException: e = sys.exc_info()[1] raise e record = self._to_record(response.parse_body(), zone=zone) return record def _to_zone(self, item): common_attr = ['id', 'name'] extra = {} for key in item: if key not in common_attr: extra[key] = item.get(key) zone = Zone(domain=item['name'], id=item['id'], type=None, ttl=None, driver=self, extra=extra) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): common_attr = ['id', 'content', 'name', 'type'] extra = {} for key in item: if key not in common_attr: extra[key] = item.get(key) record = Record(id=item['id'], name=item['name'], type=item['type'], data=item['content'], zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/cloudflare.py0000664000175000017500000003570512705475361023173 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'CloudFlareDNSDriver' ] import copy from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.utils.py3 import httplib from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError API_URL = 'https://www.cloudflare.com/api_json.html' API_HOST = 'www.cloudflare.com' API_PATH = '/api_json.html' ZONE_EXTRA_ATTRIBUTES = [ 'display_name', 'zone_status', 'zone_type', 'host_id', 'host_pubname', 'host_website', 'fqdns', 'vtxt', 'step', 'zone_status_class', 'zone_status_desc', 'orig_registrar', 'orig_dnshost', 'orig_ns_names' ] RECORD_EXTRA_ATTRIBUTES = [ 'rec_tag', 'display_name', 'pro', 'display_content', 'ttl_ceil', 'ssl_id', 'ssl_status', 'ssl_expires_on', 'auto_ttl', 'service_mode' ] class CloudFlareDNSResponse(JsonResponse): def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_body(self): body = super(CloudFlareDNSResponse, self).parse_body() body = body or {} result = body.get('result', None) error_code = body.get('err_code', None) msg = body.get('msg', None) is_error_result = result == 'error' context = self.connection.context or {} context_record_id = context.get('record_id', None) context_zone_domain = context.get('zone_domain', None) if (is_error_result and 'invalid record id' in msg.lower() and context_record_id): raise RecordDoesNotExistError(value=msg, driver=self.connection.driver, record_id=context_record_id) elif (is_error_result and 'invalid zone' in msg.lower() and context_zone_domain): raise ZoneDoesNotExistError(value=msg, driver=self.connection.driver, zone_id=context_zone_domain) if error_code == 'E_UNAUTH': raise InvalidCredsError(msg) elif result == 'error' or error_code is not None: msg = 'Request failed: %s' % (self.body) raise LibcloudError(value=msg, driver=self.connection.driver) return body class CloudFlareDNSConnection(ConnectionUserAndKey): host = API_HOST secure = True responseCls = CloudFlareDNSResponse def request(self, action, params=None, data=None, headers=None, method='GET'): params = params or {} data = data or {} base_params = { 'email': self.user_id, 'tkn': self.key, 'a': action } params = copy.deepcopy(params) params.update(base_params) return super(CloudFlareDNSConnection, self).request(action=API_PATH, params=params, data=None, method=method, headers=headers) class CloudFlareDNSDriver(DNSDriver): type = Provider.CLOUDFLARE name = 'CloudFlare DNS' website = 'https://www.cloudflare.com' connectionCls = CloudFlareDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.TXT: 'TXT', RecordType.SPF: 'SPF', RecordType.NS: 'NS', RecordType.SRV: 'SRV', RecordType.URL: 'LOC' } def iterate_zones(self): # TODO: Support pagination result = self.connection.request(action='zone_load_multi').object zones = self._to_zones(data=result['response']['zones']['objs']) return zones def iterate_records(self, zone): # TODO: Support pagination params = {'z': zone.domain} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='rec_load_all', params=params) data = resp.object['response']['recs']['objs'] records = self._to_records(zone=zone, data=data) return records def get_zone(self, zone_id): # TODO: This is not efficient zones = self.list_zones() try: zone = [z for z in zones if z.id == zone_id][0] except IndexError: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) return zone def create_record(self, name, zone, type, data, extra=None): extra = extra or {} params = {'name': name, 'z': zone.domain, 'type': type, 'content': data} params['ttl'] = extra.get('ttl', 120) if 'priority' in extra: # For MX and SRV records params['prio'] = extra['priority'] self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='rec_new', params=params) item = resp.object['response']['rec']['obj'] record = self._to_record(zone=zone, item=item) return record def update_record(self, record, name=None, type=None, data=None, extra=None): extra = extra or {} params = {'z': record.zone.domain, 'id': record.id} params['name'] = name or record.name params['type'] = type or record.type params['content'] = data or record.data params['ttl'] = extra.get('ttl', None) or record.extra['ttl'] self.connection.set_context({'zone_domain': record.zone.domain}) self.connection.set_context({'record_id': record.id}) resp = self.connection.request(action='rec_edit', params=params) item = resp.object['response']['rec']['obj'] record = self._to_record(zone=record.zone, item=item) return record def delete_record(self, record): params = {'z': record.zone.domain, 'id': record.id} self.connection.set_context({'zone_domain': record.zone.domain}) self.connection.set_context({'record_id': record.id}) resp = self.connection.request(action='rec_delete', params=params) result = resp.object return result.get('result', None) == 'success' def ex_get_zone_stats(self, zone, interval=30): params = {'z': zone.domain, 'interval': interval} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='stats', params=params) result = resp.object['response']['result']['objs'][0] return result def ex_zone_check(self, zones): zone_domains = [zone.domain for zone in zones] zone_domains = ','.join(zone_domains) params = {'zones': zone_domains} resp = self.connection.request(action='zone_check', params=params) result = resp.object['response']['zones'] return result def ex_get_ip_threat_score(self, ip): """ Retrieve current threat score for a given IP. Note that scores are on a logarithmic scale, where a higher score indicates a higher threat. """ params = {'ip': ip} resp = self.connection.request(action='ip_lkup', params=params) result = resp.object['response'] return result def ex_get_zone_settings(self, zone): """ Retrieve all current settings for a given zone. """ params = {'z': zone.domain} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='zone_settings', params=params) result = resp.object['response']['result']['objs'][0] return result def ex_set_zone_security_level(self, zone, level): """ Set the zone Basic Security Level to I'M UNDER ATTACK! / HIGH / MEDIUM / LOW / ESSENTIALLY OFF. :param level: Security level. Valid values are: help, high, med, low, eoff. :type level: ``str`` """ params = {'z': zone.domain, 'v': level} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='sec_lvl', params=params) result = resp.object return result.get('result', None) == 'success' def ex_set_zone_cache_level(self, zone, level): """ Set the zone caching level. :param level: Caching level. Valid values are: agg (aggresive), basic. :type level: ``str`` """ params = {'z': zone.domain, 'v': level} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='cache_lvl', params=params) result = resp.object return result.get('result', None) == 'success' def ex_enable_development_mode(self, zone): """ Enable development mode. When Development Mode is on the cache is bypassed. Development mode remains on for 3 hours or until when it is toggled back off. """ params = {'z': zone.domain, 'v': 1} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='devmode', params=params) result = resp.object return result.get('result', None) == 'success' def ex_disable_development_mode(self, zone): """ Disable development mode. """ params = {'z': zone.domain, 'v': 0} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='devmode', params=params) result = resp.object return result.get('result', None) == 'success' def ex_purge_cached_files(self, zone): """ Purge CloudFlare of any cached files. """ params = {'z': zone.domain, 'v': 1} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='fpurge_ts', params=params) result = resp.object return result.get('result', None) == 'success' def ex_purge_cached_file(self, zone, url): """ Purge single file from CloudFlare's cache. :param url: URL to the file to purge from cache. :type url: ``str`` """ params = {'z': zone.domain, 'url': url} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='zone_file_purge', params=params) result = resp.object return result.get('result', None) == 'success' def ex_whitelist_ip(self, zone, ip): """ Whitelist the provided IP. """ params = {'z': zone.domain, 'key': ip} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='wl', params=params) result = resp.object return result.get('result', None) == 'success' def ex_blacklist_ip(self, zone, ip): """ Blacklist the provided IP. """ params = {'z': zone.domain, 'key': ip} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='ban', params=params) result = resp.object return result.get('result', None) == 'success' def ex_unlist_ip(self, zone, ip): """ Remove provided ip from the whitelist and blacklist. """ params = {'z': zone.domain, 'key': ip} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='nul', params=params) result = resp.object return result.get('result', None) == 'success' def ex_enable_ipv6_support(self, zone): """ Enable IPv6 support for the provided zone. """ params = {'z': zone.domain, 'v': 3} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='ipv46', params=params) result = resp.object return result.get('result', None) == 'success' def ex_disable_ipv6_support(self, zone): """ Disable IPv6 support for the provided zone. """ params = {'z': zone.domain, 'v': 0} self.connection.set_context({'zone_domain': zone.domain}) resp = self.connection.request(action='ipv46', params=params) result = resp.object return result.get('result', None) == 'success' def _to_zones(self, data): zones = [] for item in data: zone = self._to_zone(item=item) zones.append(zone) return zones def _to_zone(self, item): type = 'master' extra = {} extra['props'] = item.get('props', {}) extra['confirm_code'] = item.get('confirm_code', {}) extra['allow'] = item.get('allow', {}) for attribute in ZONE_EXTRA_ATTRIBUTES: value = item.get(attribute, None) extra[attribute] = value zone = Zone(id=str(item['zone_id']), domain=item['zone_name'], type=type, ttl=None, driver=self, extra=extra) return zone def _to_records(self, zone, data): records = [] for item in data: record = self._to_record(zone=zone, item=item) records.append(record) return records def _to_record(self, zone, item): name = self._get_record_name(item=item) type = item['type'] data = item['content'] if item.get('ttl', None): ttl = int(item['ttl']) else: ttl = None extra = {} extra['ttl'] = ttl extra['props'] = item.get('props', {}) for attribute in RECORD_EXTRA_ATTRIBUTES: value = item.get(attribute, None) extra[attribute] = value record = Record(id=str(item['rec_id']), name=name, type=type, data=data, zone=zone, driver=self, ttl=ttl, extra=extra) return record def _get_record_name(self, item): name = item['name'].replace('.' + item['zone_name'], '') or None if name: name = name.replace(item['zone_name'], '') or None return name apache-libcloud-2.2.1/libcloud/dns/drivers/dnsimple.py0000664000175000017500000002300212701023453022634 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DNSimple DNS Driver """ __all__ = [ 'DNSimpleDNSDriver' ] try: import simplejson as json except ImportError: import json from libcloud.common.dnsimple import DNSimpleDNSConnection from libcloud.dns.types import Provider, RecordType from libcloud.dns.base import DNSDriver, Zone, Record DEFAULT_ZONE_TTL = 3600 class DNSimpleDNSDriver(DNSDriver): type = Provider.DNSIMPLE name = 'DNSimple' website = 'https://dnsimple.com/' connectionCls = DNSimpleDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.ALIAS: 'ALIAS', RecordType.CNAME: 'CNAME', RecordType.HINFO: 'HINFO', RecordType.MX: 'MX', RecordType.NAPTR: 'NAPTR', RecordType.NS: 'NS', 'POOL': 'POOL', RecordType.SOA: 'SOA', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.SSHFP: 'SSHFP', RecordType.TXT: 'TXT', RecordType.URL: 'URL' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ response = self.connection.request('/v1/domains') zones = self._to_zones(response.object) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ response = self.connection.request('/v1/domains/%s/records' % zone.id) records = self._to_records(response.object, zone) return records def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ response = self.connection.request('/v1/domains/%s' % zone_id) zone = self._to_zone(response.object) return zone def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ response = self.connection.request('/v1/domains/%s/records/%s' % (zone_id, record_id)) record = self._to_record(response.object, zone_id=zone_id) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (All zones are master by design). :type type: ``str`` :param ttl: TTL for new records. (This is not really used) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Zone` For more info, please see: http://developer.dnsimple.com/v1/domains/ """ r_json = {'name': domain} if extra is not None: r_json.update(extra) r_data = json.dumps({'domain': r_json}) response = self.connection.request( '/v1/domains', method='POST', data=r_data) zone = self._to_zone(response.object) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ r_json = {'name': name, 'record_type': type, 'content': data} if extra is not None: r_json.update(extra) r_data = json.dumps({'record': r_json}) response = self.connection.request('/v1/domains/%s/records' % zone.id, method='POST', data=r_data) record = self._to_record(response.object, zone=zone) return record def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ zone = record.zone r_json = {'name': name, 'content': data} if extra is not None: r_json.update(extra) r_data = json.dumps({'record': r_json}) response = self.connection.request('/v1/domains/%s/records/%s' % (zone.id, record.id), method='PUT', data=r_data) record = self._to_record(response.object, zone=zone) return record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ self.connection.request('/v1/domains/%s' % zone.id, method='DELETE') return True def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ zone_id = record.zone.id self.connection.request('/v1/domains/%s/records/%s' % (zone_id, record.id), method='DELETE') return True def _to_zones(self, data): zones = [] for zone in data: _zone = self._to_zone(zone) zones.append(_zone) return zones def _to_zone(self, data): domain = data.get('domain') id = domain.get('id') name = domain.get('name') extra = {'registrant_id': domain.get('registrant_id'), 'user_id': domain.get('user_id'), 'unicode_name': domain.get('unicode_name'), 'token': domain.get('token'), 'state': domain.get('state'), 'language': domain.get('language'), 'lockable': domain.get('lockable'), 'auto_renew': domain.get('auto_renew'), 'whois_protected': domain.get('whois_protected'), 'record_count': domain.get('record_count'), 'service_count': domain.get('service_count'), 'expires_on': domain.get('expires_on'), 'created_at': domain.get('created_at'), 'updated_at': domain.get('updated_at')} # All zones are primary by design type = 'master' return Zone(id=id, domain=name, type=type, ttl=DEFAULT_ZONE_TTL, driver=self, extra=extra) def _to_records(self, data, zone): records = [] for item in data: record = self._to_record(item, zone=zone) records.append(record) return records def _to_record(self, data, zone_id=None, zone=None): if not zone: # We need zone_id or zone zone = self.get_zone(zone_id) record = data.get('record') id = record.get('id') name = record.get('name') type = record.get('record_type') data = record.get('content') extra = {'ttl': record.get('ttl'), 'created_at': record.get('created_at'), 'updated_at': record.get('updated_at'), 'domain_id': record.get('domain_id'), 'priority': record.get('prio')} return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=record.get('ttl', None), extra=extra) apache-libcloud-2.2.1/libcloud/dns/drivers/godaddy.py0000664000175000017500000003714213153541406022453 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'GoDaddyDNSDriver' ] try: import simplejson as json except: import json from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.types import LibcloudError from libcloud.utils.py3 import httplib from libcloud.dns.types import Provider, RecordType, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record API_HOST = 'api.godaddy.com' VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl'] class GoDaddyDNSException(LibcloudError): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return self.__repr__() def __repr__(self): return ('' % (self.code, self.message)) class GoDaddyDNSResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_body(self): if not self.body: return None # json.loads doesn't like the regex expressions used in godaddy schema self.body = self.body.replace('\\.', '\\\\.') data = json.loads(self.body) return data def parse_error(self): data = self.parse_body() raise GoDaddyDNSException(code=data['code'], message=data['message']) def success(self): return self.status in self.valid_response_codes class GoDaddyDNSConnection(ConnectionKey): responseCls = GoDaddyDNSResponse host = API_HOST allow_insecure = False def __init__(self, key, secret, secure=True, shopper_id=None, host=None, port=None, url=None, timeout=None, proxy_url=None, backoff=None, retry_delay=None): super(GoDaddyDNSConnection, self).__init__( key, secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url, backoff=backoff, retry_delay=retry_delay) self.key = key self.secret = secret self.shopper_id = shopper_id def add_default_headers(self, headers): if self.shopper_id is not None: headers['X-Shopper-Id'] = self.shopper_id headers['Content-type'] = 'application/json' headers['Authorization'] = "sso-key %s:%s" % \ (self.key, self.secret) return headers class GoDaddyDNSDriver(DNSDriver): """ A driver for GoDaddy DNS. This is for customers of GoDaddy who wish to purchase, update existing domains and manage records for DNS zones owned by GoDaddy NS servers. """ type = Provider.GODADDY name = 'GoDaddy DNS' website = 'https://www.godaddy.com/' connectionCls = GoDaddyDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } def __init__(self, shopper_id, key, secret, secure=True, host=None, port=None): """ Instantiate a new `GoDaddyDNSDriver` :param shopper_id: Your customer ID or shopper ID with GoDaddy :type shopper_id: ``str`` :param key: Your access key from developer.godaddy.com :type key: ``str`` :param secret: Your access key secret :type secret: ``str`` """ self.shopper_id = shopper_id super(GoDaddyDNSDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, shopper_id=str(shopper_id)) def list_zones(self): """ Return a list of zones (purchased domains) :return: ``list`` of :class:`Zone` """ result = self.connection.request( '/v1/domains/').object zones = self._to_zones(result) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ result = self.connection.request( '/v1/domains/%s/records' % (zone.domain)).object records = self._to_records(items=result, zone=zone) return records def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ new_record = self._format_record(name, type, data, extra) self.connection.request( '/v1/domains/%s/records' % (zone.domain), method='PATCH', data=json.dumps([new_record])) id = self._get_id_of_record(name, type) return Record( id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=new_record['ttl'], extra=extra) def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ new_record = self._format_record(name, type, data, extra) self.connection.request( '/v1/domains/%s/records/%s/%s' % (record.zone.domain, record.type, record.name), method='PUT', data=json.dumps([new_record])) id = self._get_id_of_record(name, type) return Record( id=id, name=name, type=type, data=data, zone=record.zone, driver=self, ttl=new_record['ttl'], extra=extra) def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ parts = record_id.split(':') result = self.connection.request( '/v1/domains/%s/records/%s/%s' % ( zone_id, parts[1], parts[0])).object if len(result) == 0: raise RecordDoesNotExistError(record_id, driver=self, record_id=record_id) return self._to_record(result[0], self.get_zone(zone_id)) def get_zone(self, zone_id): """ Get a zone (by domain) :param zone_id: The domain, not the ID :type zone_id: ``str`` :rtype: :class:`Zone` """ result = self.connection.request( '/v1/domains/%s/' % zone_id).object zone = self._to_zone(result) return zone def delete_zone(self, zone): """ Delete a zone. Note: This will CANCEL a purchased domain :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ self.connection.request( '/v1/domains/%s' % (zone.domain), method='DELETE') # no error means ok return True def ex_check_availability(self, domain, for_transfer=False): """ Check the availability of the domain :param domain: the domain name e.g. wazzlewobbleflooble.com :type domain: ``str`` :param for_transfer: Check if domain is available for transfer :type for_transfer: ``bool`` :rtype: `list` of :class:`GoDaddyAvailability` """ result = self.connection.request( '/v1/domains/available', method='GET', params={ 'domain': domain, 'forTransfer': str(for_transfer) } ).object return GoDaddyAvailability( domain=result['domain'], available=result['available'], price=result['price'], currency=result['currency'], period=result['period'] ) def ex_list_tlds(self): """ List available TLDs for sale :rtype: ``list`` of :class:`GoDaddyTLD` """ result = self.connection.request( '/v1/domains/tlds', method='GET' ).object return self._to_tlds(result) def ex_get_purchase_schema(self, tld): """ Get the schema that needs completing to purchase a new domain Use this in conjunction with ex_purchase_domain :param tld: The top level domain e.g com, eu, uk :type tld: ``str`` :rtype: `dict` the JSON Schema """ result = self.connection.request( '/v1/domains/purchase/schema/%s' % tld, method='GET' ).object return result def ex_get_agreements(self, tld, privacy=True): """ Get the legal agreements for a tld Use this in conjunction with ex_purchase_domain :param tld: The top level domain e.g com, eu, uk :type tld: ``str`` :rtype: `dict` the JSON Schema """ result = self.connection.request( '/v1/domains/agreements', params={ 'tlds': tld, 'privacy': str(privacy) }, method='GET' ).object agreements = [] for item in result: agreements.append( GoDaddyLegalAgreement( agreement_key=item['agreementKey'], title=item['title'], url=item['url'], content=item['content'])) return agreements def ex_purchase_domain(self, purchase_request): """ Purchase a domain with GoDaddy :param purchase_request: The completed document from ex_get_purchase_schema :type purchase_request: ``dict`` :rtype: :class:`GoDaddyDomainPurchaseResponse` Your order """ result = self.connection.request( '/v1/domains/purchase', data=purchase_request, method='POST' ).object return GoDaddyDomainPurchaseResponse( order_id=result['orderId'], item_count=result['itemCount'], total=result['total'], currency=result['currency'] ) def _format_record(self, name, type, data, extra): if extra is None: extra = {} new_record = {} if type == RecordType.SRV: new_record = { 'type': type, 'name': name, 'data': data, 'priority': 1, 'ttl': extra.get('ttl', 5), 'service': extra.get('service', ''), 'protocol': extra.get('protocol', ''), 'port': extra.get('port', ''), 'weight': extra.get('weight', '1') } else: new_record = { 'type': type, 'name': name, 'data': data, 'ttl': extra.get('ttl', 5) } if type == RecordType.MX: new_record['priority'] = 1 return new_record def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_zone(self, item): extra = {"expires": item['expires']} zone = Zone(id=item['domainId'], domain=item['domain'], type='master', ttl=None, driver=self, extra=extra) return zone def _to_records(self, items, zone=None): records = [] for item in items: records.append(self._to_record(item=item, zone=zone)) return records def _to_record(self, item, zone=None): ttl = item['ttl'] type = self._string_to_record_type(item['type']) name = item['name'] id = self._get_id_of_record(name, type) record = Record(id=id, name=name, type=type, data=item['data'], zone=zone, driver=self, ttl=ttl) return record def _to_tlds(self, items): tlds = [] for item in items: tlds.append(self._to_tld(item)) return tlds def _to_tld(self, item): return GoDaddyTLD( name=item['name'], tld_type=item['type'] ) def _get_id_of_record(self, name, type): return '%s:%s' % (name, type) def _ex_connection_class_kwargs(self): return {'shopper_id': self.shopper_id} class GoDaddyAvailability(object): def __init__(self, domain, available, price, currency, period): self.domain = domain self.available = bool(available) # currency comes in micro-units, convert to dollars. self.price = float(price) / 1000000 self.currency = currency self.period = int(period) class GoDaddyTLD(object): def __init__(self, name, tld_type): self.name = name self.type = tld_type class GoDaddyDomainPurchaseResponse(object): def __init__(self, order_id, item_count, total, currency): self.order_id = order_id self.item_count = item_count self.total = total self.current = currency class GoDaddyLegalAgreement(object): def __init__(self, agreement_key, title, url, content): self.agreement_key = agreement_key self.title = title self.url = url self.content = content apache-libcloud-2.2.1/libcloud/dns/drivers/dummy.py0000664000175000017500000001715512701023453022170 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import RecordType from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.types import RecordAlreadyExistsError class DummyDNSDriver(DNSDriver): """ Dummy DNS driver. >>> from libcloud.dns.drivers.dummy import DummyDNSDriver >>> driver = DummyDNSDriver('key', 'secret') >>> driver.name 'Dummy DNS Provider' """ name = 'Dummy DNS Provider' website = 'http://example.com' def __init__(self, api_key, api_secret): """ :param api_key: API key or username to used (required) :type api_key: ``str`` :param api_secret: Secret password to be used (required) :type api_secret: ``str`` :rtype: ``None`` """ self._zones = {} def list_record_types(self): """ >>> driver = DummyDNSDriver('key', 'secret') >>> driver.list_record_types() ['A'] @inherits: :class:`DNSDriver.list_record_types` """ return [RecordType.A] def list_zones(self): """ >>> driver = DummyDNSDriver('key', 'secret') >>> driver.list_zones() [] @inherits: :class:`DNSDriver.list_zones` """ return [zone['zone'] for zone in list(self._zones.values())] def list_records(self, zone): """ >>> driver = DummyDNSDriver('key', 'secret') >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) >>> list(zone.list_records()) [] >>> record = driver.create_record(name='libcloud', zone=zone, ... type=RecordType.A, data='127.0.0.1') >>> list(zone.list_records()) #doctest: +ELLIPSIS [] """ return self._zones[zone.id]['records'].values() def get_zone(self, zone_id): """ >>> driver = DummyDNSDriver('key', 'secret') >>> driver.get_zone(zone_id='foobar') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ZoneDoesNotExistError: @inherits: :class:`DNSDriver.get_zone` """ if zone_id not in self._zones: raise ZoneDoesNotExistError(driver=self, value=None, zone_id=zone_id) return self._zones[zone_id]['zone'] def get_record(self, zone_id, record_id): """ >>> driver = DummyDNSDriver('key', 'secret') >>> driver.get_record(zone_id='doesnotexist', record_id='exists') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ZoneDoesNotExistError: @inherits: :class:`DNSDriver.get_record` """ self.get_zone(zone_id=zone_id) zone_records = self._zones[zone_id]['records'] if record_id not in zone_records: raise RecordDoesNotExistError(record_id=record_id, value=None, driver=self) return zone_records[record_id] def create_zone(self, domain, type='master', ttl=None, extra=None): """ >>> driver = DummyDNSDriver('key', 'secret') >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) >>> zone >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ZoneAlreadyExistsError: @inherits: :class:`DNSDriver.create_zone` """ id = 'id-%s' % (domain) if id in self._zones: raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self) zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={}, driver=self) self._zones[id] = {'zone': zone, 'records': {}} return zone def create_record(self, name, zone, type, data, extra=None): """ >>> driver = DummyDNSDriver('key', 'secret') >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) >>> record = driver.create_record(name='libcloud', zone=zone, ... type=RecordType.A, data='127.0.0.1') >>> record #doctest: +ELLIPSIS >>> record = driver.create_record(name='libcloud', zone=zone, ... type=RecordType.A, data='127.0.0.1') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): RecordAlreadyExistsError: @inherits: :class:`DNSDriver.create_record` """ id = 'id-%s' % (name) zone = self.get_zone(zone_id=zone.id) if id in self._zones[zone.id]['records']: raise RecordAlreadyExistsError(record_id=id, value=None, driver=self) record = Record(id=id, name=name, type=type, data=data, extra=extra, zone=zone, driver=self) self._zones[zone.id]['records'][id] = record return record def delete_zone(self, zone): """ >>> driver = DummyDNSDriver('key', 'secret') >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) >>> driver.delete_zone(zone) True >>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ZoneDoesNotExistError: @inherits: :class:`DNSDriver.delete_zone` """ self.get_zone(zone_id=zone.id) del self._zones[zone.id] return True def delete_record(self, record): """ >>> driver = DummyDNSDriver('key', 'secret') >>> zone = driver.create_zone(domain='apache.org', type='master', ... ttl=100) >>> record = driver.create_record(name='libcloud', zone=zone, ... type=RecordType.A, data='127.0.0.1') >>> driver.delete_record(record) True >>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): RecordDoesNotExistError: @inherits: :class:`DNSDriver.delete_record` """ self.get_record(zone_id=record.zone.id, record_id=record.id) del self._zones[record.zone.id]['records'][record.id] return True if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/dns/drivers/softlayer.py0000664000175000017500000001647012705475361023061 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unexpected-keyword-arg __all__ = [ 'SoftLayerDNSDriver' ] from libcloud.common.softlayer import SoftLayerConnection from libcloud.common.softlayer import SoftLayerObjectDoesntExist from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record VALID_RECORD_EXTRA_PARAMS = ['priority', 'ttl'] class SoftLayerDNSDriver(DNSDriver): type = Provider.SOFTLAYER name = 'Softlayer DNS' website = 'https://www.softlayer.com' connectionCls = SoftLayerConnection RECORD_TYPE_MAP = { RecordType.A: 'a', RecordType.AAAA: 'aaaa', RecordType.CNAME: 'cname', RecordType.MX: 'mx', RecordType.NS: 'ns', RecordType.PTR: 'ptr', RecordType.SOA: 'soa', RecordType.SPF: 'spf', RecordType.SRV: 'srv', RecordType.TXT: 'txt', } def create_zone(self, domain, ttl=None, extra=None): self.connection.set_context({'resource': 'zone', 'id': domain}) data = { 'name': domain, 'resourceRecords': [] } response = self.connection.request( 'SoftLayer_Dns_Domain', 'createObject', data ).object zone = Zone(id=response['id'], domain=domain, type='master', ttl=3600, driver=self) return zone def get_zone(self, zone_id): self.connection.set_context({'resource': 'zone', 'id': zone_id}) try: response = self.connection.request( 'SoftLayer_Dns_Domain', 'getObject', id=zone_id ).object except SoftLayerObjectDoesntExist: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) return self._to_zone(response) def delete_zone(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) try: self.connection.request( 'SoftLayer_Dns_Domain', 'deleteObject', id=zone.id ).object except SoftLayerObjectDoesntExist: raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.id) else: return True def iterate_zones(self): zones_list = self.connection.request( 'SoftLayer_Dns_Domain', 'getByDomainName', '.' ).object for item in zones_list: yield self._to_zone(item) def iterate_records(self, zone): self.connection.set_context({'resource': 'zone', 'id': zone.id}) records_list = self.connection.request( 'SoftLayer_Dns_Domain', 'getResourceRecords', id=zone.id ).object for item in records_list: yield self._to_record(item, zone=zone) def get_record(self, zone_id, record_id): try: record = self.connection.request( 'SoftLayer_Dns_Domain_ResourceRecord', 'getObject', id=record_id ).object return self._to_record(record, zone=self.get_zone(zone_id)) except SoftLayerObjectDoesntExist: raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) def delete_record(self, record): try: self.connection.request( 'SoftLayer_Dns_Domain_ResourceRecord', 'deleteObject', id=record.id ).object except SoftLayerObjectDoesntExist: raise RecordDoesNotExistError(value='', driver=self, record_id=record.id) else: return True def create_record(self, name, zone, type, data, extra=None): params = { 'domainId': zone.id, 'type': self.RECORD_TYPE_MAP[type], 'host': name, 'data': data } if extra: if extra.get('ttl'): params['ttl'] = extra['ttl'] if extra.get('refresh'): params['refresh'] = extra['refresh'] if extra.get('retry'): params['retry'] = extra['retry'] if extra.get('expire'): params['expire'] = extra['expire'] if extra.get('priority'): params['mxPriority'] = extra['priority'] response = self.connection.request( 'SoftLayer_Dns_Domain_ResourceRecord', 'createObject', params ).object return self._to_record(response, zone=zone) def update_record( self, record, name=None, type=None, data=None, extra=None): params = {} if type: params['type'] = self.RECORD_TYPE_MAP[type] if name: params['host'] = name if data: params['data'] = data if extra: if extra.get('ttl'): params['ttl'] = extra['ttl'] if extra.get('refresh'): params['refresh'] = extra['refresh'] if extra.get('retry'): params['retry'] = extra['retry'] if extra.get('expire'): params['expire'] = extra['expire'] if extra.get('priority'): params['mxPriority'] = extra['priority'] response = self.connection.request( 'SoftLayer_Dns_Domain_ResourceRecord', 'editObject', params, id=record.id, ).object if response: changed_record = self.connection.request( 'SoftLayer_Dns_Domain_ResourceRecord', 'getObject', id=record.id, ).object return self._to_record(changed_record, zone=record.zone) else: return False def _to_zone(self, item): ttl = item.get('ttl', 3600) zone = Zone(id=item['id'], domain=item['name'], type='master', ttl=ttl, driver=self) return zone def _to_record(self, item, zone=None): extra = { 'ttl': item['ttl'], 'expire': item['expire'], 'mxPriority': item['mxPriority'], 'refresh': item['refresh'], 'retry': item['retry'], } record = Record( id=item['id'], name=item['host'], type=self._string_to_record_type(item['type']), data=item['data'], zone=zone, driver=self, ttl=item['ttl'], extra=extra ) return record apache-libcloud-2.2.1/libcloud/dns/drivers/buddyns.py0000664000175000017500000001127112703467102022503 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BuddyNS DNS Driver """ import sys try: import simplejson as json except ImportError: import json from libcloud.dns.types import Provider, ZoneDoesNotExistError,\ ZoneAlreadyExistsError from libcloud.dns.base import DNSDriver, Zone from libcloud.common.buddyns import BuddyNSConnection, BuddyNSResponse,\ BuddyNSException __all__ = [ 'BuddyNSDNSDriver' ] class BuddyNSDNSResponse(BuddyNSResponse): pass class BuddyNSDNSConnection(BuddyNSConnection): responseCls = BuddyNSDNSResponse class BuddyNSDNSDriver(DNSDriver): name = 'BuddyNS DNS' website = 'https://www.buddyns.com' type = Provider.BUDDYNS connectionCls = BuddyNSDNSConnection def list_zones(self): action = '/api/v2/zone/' response = self.connection.request(action=action, method='GET') zones = self._to_zones(items=response.parse_body()) return zones def get_zone(self, zone_id): """ :param zone_id: Zone domain name (e.g. example.com) :return: :class:`Zone` """ action = '/api/v2/zone/%s' % zone_id try: response = self.connection.request(action=action, method='GET') except BuddyNSException: e = sys.exc_info()[1] if e.message == 'Not found': raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone_id) else: raise e zone = self._to_zone(response.parse_body()) return zone def create_zone(self, domain, type='master', ttl=None, extra=None): """ :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (This is not really used. See API docs for extra parameters) :type type: ``str`` :param ttl: TTL for new records (This is used through the extra param) :type ttl: ``int`` :param extra: Extra attributes that are specific to the driver such as ttl. :type extra: ``dict`` :rtype: :class:`Zone` Do not forget to pass the master in extra, extra = {'master':'65.55.37.62'} for example. """ action = '/api/v2/zone/' data = {'name': domain} if extra is not None: data.update(extra) post_data = json.dumps(data) try: response = self.connection.request(action=action, method='POST', data=post_data) except BuddyNSException: e = sys.exc_info()[1] if e.message == 'Invalid zone submitted for addition.': raise ZoneAlreadyExistsError(value=e.message, driver=self, zone_id=domain) else: raise e zone = self._to_zone(response.parse_body()) return zone def delete_zone(self, zone): """ :param zone: Zone to be deleted. :type zone: :class:`Zone` :return: Boolean """ action = '/api/v2/zone/%s' % zone.domain try: self.connection.request(action=action, method='DELETE') except BuddyNSException: e = sys.exc_info()[1] if e.message == 'Not found': raise ZoneDoesNotExistError(value=e.message, driver=self, zone_id=zone.id) else: raise e return True def _to_zone(self, item): common_keys = ['name', ] extra = {} for key in item: if key not in common_keys: extra[key] = item.get(key) zone = Zone(domain=item['name'], id=item['name'], type=None, extra=extra, ttl=None, driver=self) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones apache-libcloud-2.2.1/libcloud/dns/drivers/route53.py0000664000175000017500000005135113153541406022344 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'Route53DNSDriver' ] import base64 import hmac import datetime import uuid import copy from libcloud.utils.py3 import httplib from hashlib import sha1 from libcloud.utils.py3 import ET from libcloud.utils.py3 import b, urlencode from libcloud.utils.xml import findtext, findall, fixxpath from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.common.types import LibcloudError from libcloud.common.aws import AWSGenericResponse, AWSTokenConnection from libcloud.common.base import ConnectionUserAndKey API_VERSION = '2012-02-29' API_HOST = 'route53.amazonaws.com' API_ROOT = '/%s/' % (API_VERSION) NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT) class InvalidChangeBatch(LibcloudError): pass class Route53DNSResponse(AWSGenericResponse): """ Amazon Route53 response class. """ namespace = NAMESPACE xpath = 'Error' exceptions = { 'NoSuchHostedZone': ZoneDoesNotExistError, 'InvalidChangeBatch': InvalidChangeBatch, } class BaseRoute53Connection(ConnectionUserAndKey): host = API_HOST responseCls = Route53DNSResponse def pre_connect_hook(self, params, headers): time_string = datetime.datetime.utcnow() \ .strftime('%a, %d %b %Y %H:%M:%S GMT') headers['Date'] = time_string tmp = [] signature = self._get_aws_auth_b64(self.key, time_string) auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature, 'Algorithm': 'HmacSHA1'} for k, v in auth.items(): tmp.append('%s=%s' % (k, v)) headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp) return params, headers def _get_aws_auth_b64(self, secret_key, time_string): b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest() ) return b64_hmac.decode('utf-8') class Route53Connection(AWSTokenConnection, BaseRoute53Connection): pass class Route53DNSDriver(DNSDriver): type = Provider.ROUTE53 name = 'Route53 DNS' website = 'http://aws.amazon.com/route53/' connectionCls = Route53Connection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', } def __init__(self, *args, **kwargs): self.token = kwargs.pop('token', None) super(Route53DNSDriver, self).__init__(*args, **kwargs) def iterate_zones(self): return self._get_more('zones') def iterate_records(self, zone): return self._get_more('records', zone=zone) def get_zone(self, zone_id): self.connection.set_context({'zone_id': zone_id}) uri = API_ROOT + 'hostedzone/' + zone_id data = self.connection.request(uri).object elem = findall(element=data, xpath='HostedZone', namespace=NAMESPACE)[0] return self._to_zone(elem) def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) record_type, name = record_id.split(':', 1) if name: full_name = ".".join((name, zone.domain)) else: full_name = zone.domain self.connection.set_context({'zone_id': zone_id}) params = urlencode({ 'name': full_name, 'type': record_type, 'maxitems': '1' }) uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params data = self.connection.request(uri).object record = self._to_records(data=data, zone=zone)[0] # A cute aspect of the /rrset filters is that they are more pagination # hints than filters!! # So will return a result even if its not what you asked for. record_type_num = self._string_to_record_type(record_type) if record.name != name or record.type != record_type_num: raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) return record def create_zone(self, domain, type='master', ttl=None, extra=None): zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE}) ET.SubElement(zone, 'Name').text = domain ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4()) if extra and 'Comment' in extra: hzg = ET.SubElement(zone, 'HostedZoneConfig') ET.SubElement(hzg, 'Comment').text = extra['Comment'] uri = API_ROOT + 'hostedzone' data = ET.tostring(zone) rsp = self.connection.request(uri, method='POST', data=data).object elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0] return self._to_zone(elem=elem) def delete_zone(self, zone, ex_delete_records=False): self.connection.set_context({'zone_id': zone.id}) if ex_delete_records: self.ex_delete_all_records(zone=zone) uri = API_ROOT + 'hostedzone/%s' % (zone.id) response = self.connection.request(uri, method='DELETE') return response.status in [httplib.OK] def create_record(self, name, zone, type, data, extra=None): if type in (RecordType.TXT, RecordType.SPF): data = self._quote_data(data) extra = extra or {} batch = [('CREATE', name, type, data, extra)] self._post_changeset(zone, batch) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=extra.get('ttl', None), extra=extra) def update_record(self, record, name=None, type=None, data=None, extra=None): name = name or record.name type = type or record.type extra = extra or record.extra if not extra: extra = record.extra # Multiple value records need to be handled specially - we need to # pass values for other records as well multiple_value_record = record.extra.get('_multi_value', False) other_records = record.extra.get('_other_records', []) if multiple_value_record and other_records: self._update_multi_value_record(record=record, name=name, type=type, data=data, extra=extra) else: self._update_single_value_record(record=record, name=name, type=type, data=data, extra=extra) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) return Record(id=id, name=name, type=type, data=data, zone=record.zone, driver=self, ttl=extra.get('ttl', None), extra=extra) def delete_record(self, record): try: r = record batch = [('DELETE', r.name, r.type, r.data, r.extra)] self._post_changeset(record.zone, batch) except InvalidChangeBatch: raise RecordDoesNotExistError(value='', driver=self, record_id=r.id) return True def ex_create_multi_value_record(self, name, zone, type, data, extra=None): """ Create a record with multiple values with a single call. :return: A list of created records. :rtype: ``list`` of :class:`libcloud.dns.base.Record` """ extra = extra or {} attrs = {'xmlns': NAMESPACE} changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) batch = ET.SubElement(changeset, 'ChangeBatch') changes = ET.SubElement(batch, 'Changes') change = ET.SubElement(changes, 'Change') ET.SubElement(change, 'Action').text = 'CREATE' rrs = ET.SubElement(change, 'ResourceRecordSet') ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) rrecs = ET.SubElement(rrs, 'ResourceRecords') # Value is provided as a multi line string values = [value.strip() for value in data.split('\n') if value.strip()] for value in values: rrec = ET.SubElement(rrecs, 'ResourceRecord') ET.SubElement(rrec, 'Value').text = value uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' data = ET.tostring(changeset) self.connection.set_context({'zone_id': zone.id}) self.connection.request(uri, method='POST', data=data) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) records = [] for value in values: record = Record(id=id, name=name, type=type, data=value, zone=zone, driver=self, ttl=extra.get('ttl', None), extra=extra) records.append(record) return records def ex_delete_all_records(self, zone): """ Remove all the records for the provided zone. :param zone: Zone to delete records for. :type zone: :class:`Zone` """ deletions = [] for r in zone.list_records(): if r.type in (RecordType.NS, RecordType.SOA): continue deletions.append(('DELETE', r.name, r.type, r.data, r.extra)) if deletions: self._post_changeset(zone, deletions) def _update_single_value_record(self, record, name=None, type=None, data=None, extra=None): batch = [ ('DELETE', record.name, record.type, record.data, record.extra), ('CREATE', name, type, data, extra) ] return self._post_changeset(record.zone, batch) def _update_multi_value_record(self, record, name=None, type=None, data=None, extra=None): other_records = record.extra.get('_other_records', []) attrs = {'xmlns': NAMESPACE} changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) batch = ET.SubElement(changeset, 'ChangeBatch') changes = ET.SubElement(batch, 'Changes') # Delete existing records change = ET.SubElement(changes, 'Change') ET.SubElement(change, 'Action').text = 'DELETE' rrs = ET.SubElement(change, 'ResourceRecordSet') if record.name: record_name = record.name + '.' + record.zone.domain else: record_name = record.zone.domain ET.SubElement(rrs, 'Name').text = record_name ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type] ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0')) rrecs = ET.SubElement(rrs, 'ResourceRecords') rrec = ET.SubElement(rrecs, 'ResourceRecord') ET.SubElement(rrec, 'Value').text = record.data for other_record in other_records: rrec = ET.SubElement(rrecs, 'ResourceRecord') ET.SubElement(rrec, 'Value').text = other_record['data'] # Re-create new (updated) records. Since we are updating a multi value # record, only a single record is updated and others are left as is. change = ET.SubElement(changes, 'Change') ET.SubElement(change, 'Action').text = 'CREATE' rrs = ET.SubElement(change, 'ResourceRecordSet') if name: record_name = name + '.' + record.zone.domain else: record_name = record.zone.domain ET.SubElement(rrs, 'Name').text = record_name ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) rrecs = ET.SubElement(rrs, 'ResourceRecords') rrec = ET.SubElement(rrecs, 'ResourceRecord') ET.SubElement(rrec, 'Value').text = data for other_record in other_records: rrec = ET.SubElement(rrecs, 'ResourceRecord') ET.SubElement(rrec, 'Value').text = other_record['data'] uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset' data = ET.tostring(changeset) self.connection.set_context({'zone_id': record.zone.id}) response = self.connection.request(uri, method='POST', data=data) return response.status == httplib.OK def _post_changeset(self, zone, changes_list): attrs = {'xmlns': NAMESPACE} changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) batch = ET.SubElement(changeset, 'ChangeBatch') changes = ET.SubElement(batch, 'Changes') for action, name, type_, data, extra in changes_list: change = ET.SubElement(changes, 'Change') ET.SubElement(change, 'Action').text = action rrs = ET.SubElement(change, 'ResourceRecordSet') if name: record_name = name + '.' + zone.domain else: record_name = zone.domain ET.SubElement(rrs, 'Name').text = record_name ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_] ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) rrecs = ET.SubElement(rrs, 'ResourceRecords') rrec = ET.SubElement(rrecs, 'ResourceRecord') if 'priority' in extra: data = '%s %s' % (extra['priority'], data) ET.SubElement(rrec, 'Value').text = data uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' data = ET.tostring(changeset) self.connection.set_context({'zone_id': zone.id}) response = self.connection.request(uri, method='POST', data=data) return response.status == httplib.OK def _to_zones(self, data): zones = [] for element in data.findall(fixxpath(xpath='HostedZones/HostedZone', namespace=NAMESPACE)): zones.append(self._to_zone(element)) return zones def _to_zone(self, elem): name = findtext(element=elem, xpath='Name', namespace=NAMESPACE) id = findtext(element=elem, xpath='Id', namespace=NAMESPACE).replace('/hostedzone/', '') comment = findtext(element=elem, xpath='Config/Comment', namespace=NAMESPACE) resource_record_count = int(findtext(element=elem, xpath='ResourceRecordSetCount', namespace=NAMESPACE)) extra = {'Comment': comment, 'ResourceRecordSetCount': resource_record_count} zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self, extra=extra) return zone def _to_records(self, data, zone): records = [] elems = data.findall( fixxpath(xpath='ResourceRecordSets/ResourceRecordSet', namespace=NAMESPACE)) for elem in elems: record_set = elem.findall(fixxpath( xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE)) record_count = len(record_set) multiple_value_record = (record_count > 1) record_set_records = [] for index, record in enumerate(record_set): # Need to special handling for records with multiple values for # update to work correctly record = self._to_record(elem=elem, zone=zone, index=index) record.extra['_multi_value'] = multiple_value_record if multiple_value_record: record.extra['_other_records'] = [] record_set_records.append(record) # Store reference to other records so update works correctly if multiple_value_record: for index in range(0, len(record_set_records)): record = record_set_records[index] for other_index, other_record in \ enumerate(record_set_records): if index == other_index: # Skip current record continue extra = copy.deepcopy(other_record.extra) extra.pop('_multi_value') extra.pop('_other_records') item = {'name': other_record.name, 'data': other_record.data, 'type': other_record.type, 'extra': extra} record.extra['_other_records'].append(item) records.extend(record_set_records) return records def _to_record(self, elem, zone, index=0): name = findtext(element=elem, xpath='Name', namespace=NAMESPACE) name = name[:-len(zone.domain) - 1] type = self._string_to_record_type(findtext(element=elem, xpath='Type', namespace=NAMESPACE)) ttl = findtext(element=elem, xpath='TTL', namespace=NAMESPACE) if ttl is not None: ttl = int(ttl) value_elem = elem.findall( fixxpath(xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE))[index] data = findtext(element=(value_elem), xpath='Value', namespace=NAMESPACE) extra = {'ttl': ttl} if type == 'MX': split = data.split() priority, data = split extra['priority'] = int(priority) elif type == 'SRV': split = data.split() priority, weight, port, data = split extra['priority'] = int(priority) extra['weight'] = int(weight) extra['port'] = int(port) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) record = Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=extra.get('ttl', None), extra=extra) return record def _get_more(self, rtype, **kwargs): exhausted = False last_key = None while not exhausted: items, last_key, exhausted = self._get_data(rtype, last_key, **kwargs) for item in items: yield item def _get_data(self, rtype, last_key, **kwargs): params = {} if last_key: params['name'] = last_key path = API_ROOT + 'hostedzone' if rtype == 'zones': response = self.connection.request(path, params=params) transform_func = self._to_zones elif rtype == 'records': zone = kwargs['zone'] path += '/%s/rrset' % (zone.id) self.connection.set_context({'zone_id': zone.id}) response = self.connection.request(path, params=params) transform_func = self._to_records if response.status == httplib.OK: is_truncated = findtext(element=response.object, xpath='IsTruncated', namespace=NAMESPACE) exhausted = is_truncated != 'true' last_key = findtext(element=response.object, xpath='NextRecordName', namespace=NAMESPACE) items = transform_func(data=response.object, **kwargs) return items, last_key, exhausted else: return [], None, True def _ex_connection_class_kwargs(self): kwargs = super(Route53DNSDriver, self)._ex_connection_class_kwargs() kwargs['token'] = self.token return kwargs def _quote_data(self, data): if data[0] == '"' and data[-1] == '"': return data return '"{0}"'.format(data.replace('"', '\"')) apache-libcloud-2.2.1/libcloud/dns/drivers/onapp.py0000664000175000017500000002467413153541406022163 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OnApp DNS Driver """ __all__ = [ 'OnAppDNSDriver' ] import json from libcloud.common.onapp import OnAppConnection from libcloud.dns.types import Provider, RecordType from libcloud.dns.base import DNSDriver, Zone, Record DEFAULT_ZONE_TTL = 1200 class OnAppDNSDriver(DNSDriver): type = Provider.ONAPP name = 'OnApp' website = 'http://onapp.com/' connectionCls = OnAppConnection RECORD_TYPE_MAP = { RecordType.SOA: 'SOA', RecordType.NS: 'NS', RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.TXT: 'TXT', RecordType.SRV: 'SRV', } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ response = self.connection.request('/dns_zones.json') zones = self._to_zones(response.object) return zones def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ response = self.connection.request('/dns_zones/%s.json' % zone_id) zone = self._to_zone(response.object) return zone def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (All zones are master by design). :type type: ``str`` :param ttl: TTL for new records. (This is not really used) :type ttl: ``int`` :param extra: Extra attributes (set auto_populate: 0 if you don't want to auto populate with existing DNS records). (optional) :type extra: ``dict`` :rtype: :class:`Zone` For more info, please see: https://docs.onapp.com/display/52API/Add+DNS+Zone """ dns_zone = {'name': domain} if extra is not None: dns_zone.update(extra) dns_zone_data = json.dumps({'dns_zone': dns_zone}) response = self.connection.request( '/dns_zones.json', method='POST', headers={"Content-type": "application/json"}, data=dns_zone_data) zone = self._to_zone(response.object) return zone def delete_zone(self, zone): """ Delete a zone. Note: This will also delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ self.connection.request( '/dns_zones/%s.json' % zone.id, method='DELETE') return True def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ response = self.connection.request( '/dns_zones/%s/records.json' % zone.id) dns_records = response.object['dns_zone']['records'] records = self._to_records(dns_records, zone) return records def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ response = self.connection.request('/dns_zones/%s/records/%s.json' % (zone_id, record_id)) record = self._to_record(response.object, zone_id=zone_id) return record def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). Used only for A and AAAA record types. :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` For more info, please see: https://docs.onapp.com/display/52API/Add+DNS+Record """ dns_record = self._format_record(name, type, data, extra) dns_record_data = json.dumps({'dns_record': dns_record}) response = self.connection.request( '/dns_zones/%s/records.json' % zone.id, method='POST', headers={"Content-type": "application/json"}, data=dns_record_data) record = self._to_record(response.object, zone=zone) return record def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). Used only for A and AAAA record types. :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` For more info, please see: https://docs.onapp.com/display/52API/Edit+DNS+Records """ zone = record.zone dns_record = self._format_record(name, type, data, extra) dns_record_data = json.dumps({'dns_record': dns_record}) self.connection.request( '/dns_zones/%s/records/%s.json' % (zone.id, record.id), method='PUT', headers={"Content-type": "application/json"}, data=dns_record_data) record = self.get_record(zone.id, record.id) return record def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` For more info, please see: https://docs.onapp.com/display/52API/Delete+DNS+Record """ zone_id = record.zone.id self.connection.request('/dns_zones/%s/records/%s.json' % (zone_id, record.id), method='DELETE') return True # # Helper methods # def _format_record(self, name, type, data, extra): if name is '': name = '@' if extra is None: extra = {} record_type = self.RECORD_TYPE_MAP[type] new_record = { 'name': name, 'ttl': extra.get('ttl', DEFAULT_ZONE_TTL), 'type': record_type } if type == RecordType.MX: additions = { 'priority': extra.get('priority', 1), 'hostname': extra.get('hostname') } elif type == RecordType.SRV: additions = { 'port': extra.get('port'), 'weight': extra.get('weight', 1), 'priority': extra.get('priority', 1), 'hostname': extra.get('hostname') } elif type == RecordType.A: additions = {'ip': data} elif type == RecordType.CNAME: additions = {'hostname': extra.get('hostname')} elif type == RecordType.AAAA: additions = {'ip': data} elif type == RecordType.TXT: additions = {'txt': extra.get('txt')} elif type == RecordType.NS: additions = {'hostname': extra.get('hostname')} new_record.update(additions) return new_record def _to_zones(self, data): zones = [] for zone in data: _zone = self._to_zone(zone) zones.append(_zone) return zones def _to_zone(self, data): dns_zone = data.get('dns_zone') id = dns_zone.get('id') name = dns_zone.get('name') extra = {'user_id': dns_zone.get('user_id'), 'cdn_reference': dns_zone.get('cdn_reference'), 'created_at': dns_zone.get('created_at'), 'updated_at': dns_zone.get('updated_at')} type = 'master' return Zone(id=id, domain=name, type=type, ttl=DEFAULT_ZONE_TTL, driver=self, extra=extra) def _to_records(self, data, zone): records = [] data = data.values() for data_type in data: for item in data_type: record = self._to_record(item, zone=zone) records.append(record) records.sort(key=lambda x: x.id, reverse=False) return records def _to_record(self, data, zone_id=None, zone=None): if not zone: # We need zone_id or zone zone = self.get_zone(zone_id) record = data.get('dns_record') id = record.get('id') name = record.get('name') type = record.get('type') ttl = record.get('ttl', None) return Record(id=id, name=name, type=type, data=record, zone=zone, driver=self, ttl=ttl, extra={}) apache-libcloud-2.2.1/libcloud/dns/drivers/vultr.py0000664000175000017500000002732112701023453022205 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Vultr DNS Driver """ from libcloud.utils.py3 import urlencode from libcloud.common.vultr import VultrConnection, VultrResponse from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.types import ZoneAlreadyExistsError, RecordAlreadyExistsError from libcloud.dns.types import Provider, RecordType __all__ = [ 'ZoneRequiredException', 'VultrDNSResponse', 'VultrDNSConnection', 'VultrDNSDriver', ] class ZoneRequiredException(Exception): pass class VultrDNSResponse(VultrResponse): pass class VultrDNSConnection(VultrConnection): responseCls = VultrDNSResponse class VultrDNSDriver(DNSDriver): type = Provider.VULTR name = 'Vultr DNS' website = 'http://www.vultr.com/' connectionCls = VultrDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.TXT: 'TXT', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.SRV: 'SRV', } def list_zones(self): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/v1/dns/list' params = {'api_key': self.key} response = self.connection.request(action=action, params=params) zones = self._to_zones(response.objects[0]) return zones def list_records(self, zone): """ Returns a list of records for the provided zone. :param zone: zone to list records for :type zone: `Zone` :rtype: list of :class: `Record` """ if not isinstance(zone, Zone): raise ZoneRequiredException('zone should be of type Zone') zones = self.list_zones() if not self.ex_zone_exists(zone.domain, zones): raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.domain) action = '/v1/dns/records' params = {'domain': zone.domain} response = self.connection.request(action=action, params=params) records = self._to_records(response.objects[0], zone=zone) return records def get_zone(self, zone_id): """ Returns a `Zone` instance. :param zone_id: name of the zone user wants to get. :type zone_id: ``str`` :rtype: :class:`Zone` """ ret_zone = None action = '/v1/dns/list' params = {'api_key': self.key} response = self.connection.request(action=action, params=params) zones = self._to_zones(response.objects[0]) if not self.ex_zone_exists(zone_id, zones): raise ZoneDoesNotExistError(value=None, zone_id=zone_id, driver=self) for zone in zones: if zone_id == zone.domain: ret_zone = zone return ret_zone def get_record(self, zone_id, record_id): """ Returns a Record instance. :param zone_id: name of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class: `Record` """ ret_record = None zone = self.get_zone(zone_id=zone_id) records = self.list_records(zone=zone) if not self.ex_record_exists(record_id, records): raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) for record in records: if record_id == record.id: ret_record = record return ret_record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Returns a `Zone` object. :param domain: Zone domain name, (e.g. example.com). :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: (optional) Extra attributes (driver specific). (e.g. {'serverip':'127.0.0.1'}) """ extra = extra or {} if extra and extra.get('serverip'): serverip = extra['serverip'] params = {'api_key': self.key} data = urlencode({'domain': domain, 'serverip': serverip}) action = '/v1/dns/create_domain' zones = self.list_zones() if self.ex_zone_exists(domain, zones): raise ZoneAlreadyExistsError(value='', driver=self, zone_id=domain) self.connection.request(params=params, action=action, data=data, method='POST') zone = Zone(id=domain, domain=domain, type=type, ttl=ttl, driver=self, extra=extra) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ extra = extra or {} ret_record = None old_records_list = self.list_records(zone=zone) # check if record already exists # if exists raise RecordAlreadyExistsError for record in old_records_list: if record.name == name and record.data == data: raise RecordAlreadyExistsError(value='', driver=self, record_id=record.id) MX = self.RECORD_TYPE_MAP.get('MX') SRV = self.RECORD_TYPE_MAP.get('SRV') if extra and extra.get('priority'): priority = int(extra['priority']) post_data = {'domain': zone.domain, 'name': name, 'type': self.RECORD_TYPE_MAP.get(type), 'data': data} if type == MX or type == SRV: post_data['priority'] = priority encoded_data = urlencode(post_data) params = {'api_key': self.key} action = '/v1/dns/create_record' self.connection.request(action=action, params=params, data=encoded_data, method='POST') updated_zone_records = zone.list_records() for record in updated_zone_records: if record.name == name and record.data == data: ret_record = record return ret_record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ action = '/v1/dns/delete_domain' params = {'api_key': self.key} data = urlencode({'domain': zone.domain}) zones = self.list_zones() if not self.ex_zone_exists(zone.domain, zones): raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.domain) response = self.connection.request(params=params, action=action, data=data, method='POST') return response.status == 200 def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ action = '/v1/dns/delete_record' params = {'api_key': self.key} data = urlencode({'RECORDID': record.id, 'domain': record.zone.domain}) zone_records = self.list_records(record.zone) if not self.ex_record_exists(record.id, zone_records): raise RecordDoesNotExistError(value='', driver=self, record_id=record.id) response = self.connection.request(action=action, params=params, data=data, method='POST') return response.status == 200 def ex_zone_exists(self, zone_id, zones_list): """ Function to check if a `Zone` object exists. :param zone_id: Name of the `Zone` object. :type zone_id: ``str`` :param zones_list: A list containing `Zone` objects :type zones_list: ``list`` :rtype: Returns `True` or `False` """ zone_ids = [] for zone in zones_list: zone_ids.append(zone.domain) return zone_id in zone_ids def ex_record_exists(self, record_id, records_list): """ :param record_id: Name of the `Record` object. :type record_id: ``str`` :param records_list: A list containing `Record` objects :type records_list: ``list`` :rtype: ``bool`` """ record_ids = [] for record in records_list: record_ids.append(record.id) return record_id in record_ids def _to_zone(self, item): """ Build an object `Zone` from the item dictionary :param item: item to build the zone from :type item: `dictionary` :rtype: :instance: `Zone` """ type = 'master' extra = {'date_created': item['date_created']} zone = Zone(id=item['domain'], domain=item['domain'], driver=self, type=type, ttl=None, extra=extra) return zone def _to_zones(self, items): """ Returns a list of `Zone` objects. :param: items: a list that contains dictionary objects to be passed to the _to_zone function. :type items: ``list`` """ zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): extra = {} if item.get('priority'): extra['priority'] = item['priority'] type = self._string_to_record_type(item['type']) record = Record(id=item['RECORDID'], name=item['name'], type=type, data=item['data'], zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone=zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/nfsn.py0000664000175000017500000001613512701023453021776 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License.You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NFSN DNS Driver """ import re import sys from libcloud.common.exceptions import BaseHTTPError from libcloud.common.nfsn import NFSNConnection from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.types import RecordAlreadyExistsError from libcloud.dns.types import Provider, RecordType from libcloud.utils.py3 import httplib __all__ = [ 'NFSNDNSDriver', ] # The NFSN API does not return any internal "ID" strings for any DNS records. # This means that we must set all returned Record objects' id properties to # None. It also means that we cannot implement libcloud APIs that rely on # record_id, such as get_record(). Instead, the NFSN-specific # ex_get_records_by() method will return the desired Record objects. # # Additionally, the NFSN API does not provide ways to create, delete, or list # all zones, so create_zone(), delete_zone(), and list_zones() are not # implemented. class NFSNDNSDriver(DNSDriver): type = Provider.NFSN name = 'NFSN DNS' website = 'https://www.nearlyfreespeech.net' connectionCls = NFSNConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', RecordType.PTR: 'PTR', } def list_records(self, zone): """ Return a list of all records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ # Just use ex_get_records_by() with no name or type filters. return self.ex_get_records_by(zone) def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: name of the required zone, for example "example.com". :type zone_id: ``str`` :rtype: :class:`Zone` :raises: ZoneDoesNotExistError: If no zone could be found. """ # We will check if there is a serial property for this zone. If so, # then the zone exists. try: self.connection.request(action='/dns/%s/serial' % zone_id) except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.NOT_FOUND: raise ZoneDoesNotExistError(zone_id=None, driver=self, value=e.message) raise e return Zone(id=None, domain=zone_id, type='master', ttl=3600, driver=self) def ex_get_records_by(self, zone, name=None, type=None): """ Return a list of records for the provided zone, filtered by name and/or type. :param zone: Zone to list records for. :type zone: :class:`Zone` :param zone: Zone where the requested records are found. :type zone: :class:`Zone` :param name: name of the records, for example "www". (optional) :type name: ``str`` :param type: DNS record type (A, MX, TXT). (optional) :type type: :class:`RecordType` :return: ``list`` of :class:`Record` """ payload = {} if name is not None: payload['name'] = name if type is not None: payload['type'] = type action = '/dns/%s/listRRs' % zone.domain response = self.connection.request(action=action, data=payload, method='POST') return self._to_records(response, zone) def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, MX, TXT). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific, e.g. 'ttl'). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ action = '/dns/%s/addRR' % zone.domain payload = {'name': name, 'data': data, 'type': type} if extra is not None and extra.get('ttl', None) is not None: payload['ttl'] = extra['ttl'] try: self.connection.request(action=action, data=payload, method='POST') except BaseHTTPError: e = sys.exc_info()[1] exists_re = re.compile(r'That RR already exists on the domain') if e.code == httplib.BAD_REQUEST and \ re.search(exists_re, e.message) is not None: value = '"%s" already exists in %s' % (name, zone.domain) raise RecordAlreadyExistsError(value=value, driver=self, record_id=None) raise e return self.ex_get_records_by(zone=zone, name=name, type=type)[0] def delete_record(self, record): """ Use this method to delete a record. :param record: record to delete :type record: `Record` :rtype: Bool """ action = '/dns/%s/removeRR' % record.zone.domain payload = {'name': record.name, 'data': record.data, 'type': record.type} try: self.connection.request(action=action, data=payload, method='POST') except BaseHTTPError: e = sys.exc_info()[1] if e.code == httplib.NOT_FOUND: raise RecordDoesNotExistError(value=e.message, driver=self, record_id=None) raise e return True def _to_record(self, item, zone): ttl = int(item['ttl']) return Record(id=None, name=item['name'], data=item['data'], type=item['type'], zone=zone, driver=self, ttl=ttl) def _to_records(self, items, zone): records = [] for item in items.object: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/durabledns.py0000664000175000017500000006370112704474244023171 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DurableDNS Driver """ import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import ensure_string from libcloud.dns.types import Provider, RecordType from libcloud.dns.base import Record, Zone from libcloud.dns.base import DNSDriver from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError from xml.etree.ElementTree import tostring from libcloud.common.durabledns import DurableConnection, DurableResponse from libcloud.common.durabledns import DurableDNSException from libcloud.common.durabledns import _schema_builder as api_schema_builder from libcloud.common.durabledns import SCHEMA_BUILDER_MAP __all__ = [ 'ZONE_EXTRA_PARAMS_DEFAULT_VALUES', 'RECORD_EXTRA_PARAMS_DEFAULT_VALUES', 'DEFAULT_TTL', 'DurableDNSResponse', 'DurableDNSConnection', 'DurableDNSDriver' ] # This will be the default values for each extra attributes when are not # specified in the 'extra' parameter ZONE_EXTRA_PARAMS_DEFAULT_VALUES = { 'ns': 'ns1.durabledns.com.', 'mbox': 'support.durabledns.com', 'refresh': '28800', 'retry': 7200, 'expire': 604800, 'minimum': 82000, 'xfer': '', 'update_acl': '' } RECORD_EXTRA_PARAMS_DEFAULT_VALUES = {'aux': 0, 'ttl': 3600} DEFAULT_TTL = 3600 class DurableDNSResponse(DurableResponse): pass class DurableDNSConnection(DurableConnection): responseCls = DurableDNSResponse class DurableDNSDriver(DNSDriver): type = Provider.DURABLEDNS name = 'DurableDNS' website = 'https://durabledns.com' connectionCls = DurableDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.HINFO: 'HINFO', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.RP: 'RP', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ schema_params = SCHEMA_BUILDER_MAP.get('list_zones') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/listZones.php' params = {} headers = {"SOAPAction": "urn:listZoneswsdl#listZones"} response = self.connection.request(action=action, params=params, data=req_data, method="POST", headers=headers) # listZones method doens't return full data in zones as getZone # method does. zones = [] for data in response.objects: zone = self.get_zone(data.get('id')) zones.append(zone) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ schema_params = SCHEMA_BUILDER_MAP.get('list_records') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone.id} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/listRecords.php?' params = {} headers = {"SOAPAction": "urn:listRecordswsdl#listRecords"} try: response = self.connection.request(action=action, params=params, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e # listRecords method doens't return full data in records as getRecord # method does. records = [] for data in response.objects: record = self.get_record(zone.id, data.get('id')) records.append(record) return records def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ schema_params = SCHEMA_BUILDER_MAP.get('get_zone') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone_id} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/getZone.php?' params = {} headers = {"SOAPAction": "urn:getZonewsdl#getZone"} try: response = self.connection.request(action=action, params=params, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value=e.message) raise e zones = self._to_zones(response.objects) return zones[0] def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ schema_params = SCHEMA_BUILDER_MAP.get('get_record') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone_id, 'recordid': record_id} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/getRecord.php?' params = {} headers = {"SOAPAction": "urn:getRecordwsdl#getRecord"} try: response = self.connection.request(action=action, params=params, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value=e.message) if 'Record does not exist' in e.message: raise RecordDoesNotExistError(record_id=record_id, driver=self, value=e.message) raise e zone = self.get_zone(zone_id) record = self._to_record(response.objects[0], zone) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Name of zone, followed by a dot (.) (e.g. example.com.) :type domain: ``str`` :param type: Zone type (Only master available). (optional) :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes ('mbox', 'ns', 'minimum', 'refresh', 'expire', 'update_acl', 'xfer'). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ if extra is None: extra = ZONE_EXTRA_PARAMS_DEFAULT_VALUES else: extra_fields = ZONE_EXTRA_PARAMS_DEFAULT_VALUES.keys() missing = set(extra_fields).difference(set(extra.keys())) for field in missing: extra[field] = ZONE_EXTRA_PARAMS_DEFAULT_VALUES.get(field) schema_params = SCHEMA_BUILDER_MAP.get('create_zone') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': domain, 'ttl': ttl or DEFAULT_TTL} params.update(extra) urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: if isinstance(params.get(key), int): child.text = "%d" else: child.text = "%s" # We can't insert values directly in child.text because API raises # and exception for values that need to be integers. And tostring # method from ElementTree can't handle int values. skel = ensure_string(tostring(schema)) # Deal with PY3 req_data = skel % (self.key, self.secret, domain, extra.get('ns'), extra.get('mbox'), extra.get('refresh'), extra.get('retry'), extra.get('expire'), extra.get('minimum'), ttl or DEFAULT_TTL, extra.get('xfer'), extra.get('update_acl')) action = '/services/dns/createZone.php?' params = {} headers = {"SOAPAction": "urn:createZonewsdl#createZone"} try: self.connection.request(action=action, params=params, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone Already Exist' in e.message: raise ZoneAlreadyExistsError(zone_id=domain, driver=self, value=e.message) raise e zone = self.get_zone(domain) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (e.g. 'aux', 'ttl'). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ if extra is None: extra = RECORD_EXTRA_PARAMS_DEFAULT_VALUES else: if 'aux' not in extra: extra['aux'] = RECORD_EXTRA_PARAMS_DEFAULT_VALUES.get('aux') if 'ttl' not in extra: extra['ttl'] = RECORD_EXTRA_PARAMS_DEFAULT_VALUES.get('ttl') extra['ddns_enabled'] = 'N' schema_params = SCHEMA_BUILDER_MAP.get('create_record') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone.id, 'name': name, 'type': type, 'data': data} params.update(extra) urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: if isinstance(params.get(key), int): child.text = "%d" else: child.text = "%s" # We can't insert values directly in child.text because API raises # and exception for values that need to be integers. And tostring # method from ElementTree can't handle int values. skel = ensure_string(tostring(schema)) # Deal with PY3 req_data = skel % (self.key, self.secret, zone.id, name, type, data, extra.get('aux'), extra.get('ttl'), extra.get('ddns_enabled')) action = '/services/dns/createRecord.php?' headers = {"SOAPAction": "urn:createRecordwsdl#createRecord"} try: response = self.connection.request(action=action, data=req_data, method="POST", headers=headers) objects = response.objects except DurableDNSException: e = sys.exc_info()[1] # In DurableDNS is possible to create records with same data. # Their ID's will be different but the API does not implement # the RecordAlreadyExist exception. Only ZoneDoesNotExist will # be handled. if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e record_item = objects[0] record_item['name'] = name record_item['type'] = type record_item['data'] = data record_item['ttl'] = extra.get('ttl') record_item['aux'] = extra.get('aux') record = self._to_record(record_item, zone) return record def update_zone(self, zone, domain, type='master', ttl=None, extra=None): """ Update an existing zone. :param zone: Zone to update. :type zone: :class:`Zone` :param domain: Name of zone, followed by a dot (.) (e.g. example.com.) :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes ('ns', 'mbox', 'refresh', 'retry', 'expire', 'minimum', 'xfer', 'update_acl'). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ if ttl is None: ttl = zone.ttl if extra is None: extra = zone.extra else: extra_fields = ZONE_EXTRA_PARAMS_DEFAULT_VALUES.keys() missing = set(extra_fields).difference(set(extra.keys())) for field in missing: extra[field] = zone.extra.get(field) schema_params = SCHEMA_BUILDER_MAP.get('update_zone') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': domain, 'ttl': ttl} params.update(extra) urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: if isinstance(params.get(key), int): child.text = "%d" else: child.text = "%s" # We can't insert values directly in child.text because API raises # and exception for values that need to be integers. And tostring # method from ElementTree can't handle int values. skel = ensure_string(tostring(schema)) # Deal with PY3 req_data = skel % (self.key, self.secret, domain, extra['ns'], extra['mbox'], extra['refresh'], extra['retry'], extra['expire'], extra['minimum'], ttl, extra['xfer'], extra['update_acl']) action = '/services/dns/updateZone.php?' headers = {"SOAPAction": "urn:updateZonewsdl#updateZone"} try: self.connection.request(action=action, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e # After update the zone, serial number change. In order to have it # updated, we need to get again the zone data. zone = self.get_zone(zone.id) return zone def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ zone = record.zone if extra is None: extra = record.extra else: extra_fields = ['aux', 'ttl'] missing = set(extra_fields).difference(set(extra.keys())) for field in missing: extra[field] = record.extra.get(field) extra['ddns_enabled'] = 'N' schema_params = SCHEMA_BUILDER_MAP.get('update_record') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone.id, 'id': record.id, 'name': name, 'data': data} params.update(extra) urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: if isinstance(params.get(key), int): child.text = "%d" else: child.text = "%s" # We can't insert values directly in child.text because API raises # and exception for values that need to be integers. And tostring # method from ElementTree can't handle int values. skel = ensure_string(tostring(schema)) # Deal with PY3 req_data = skel % (self.key, self.secret, zone.id, record.id, name, extra.get('aux'), data, extra.get('ttl'), extra.get('ddns_enabled')) action = '/services/dns/updateRecord.php?' headers = {"SOAPAction": "urn:updateRecordwsdl#updateRecord"} try: self.connection.request(action=action, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e record_item = {} record_item['id'] = record.id record_item['name'] = name record_item['type'] = type record_item['data'] = data record_item['ttl'] = extra.get('ttl') record_item['aux'] = extra.get('aux') record = self._to_record(record_item, zone) return record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ schema_params = SCHEMA_BUILDER_MAP.get('delete_zone') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': zone.id} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/deleteZone.php?' headers = {"SOAPAction": "urn:deleteZonewsdl#deleteZone"} try: response = self.connection.request(action=action, data=req_data, method="POST", headers=headers) except DurableDNSException: e = sys.exc_info()[1] if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=zone.id, driver=self, value=e.message) raise e return response.status in [httplib.OK] def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ schema_params = SCHEMA_BUILDER_MAP.get('delete_record') attributes = schema_params.get('attributes') schema = api_schema_builder(schema_params.get('urn_nid'), schema_params.get('method'), attributes) params = {'apiuser': self.key, 'apikey': self.secret, 'zonename': record.zone.id, 'id': record.id} urn = schema.getchildren()[0] for child in urn: key = child.tag.split(':')[2] if key in attributes: child.text = str(params.get(key)) req_data = tostring(schema) action = '/services/dns/deleteRecord.php?' headers = {"SOAPAction": "urn:deleteRecordwsdl#deleteRecord"} try: response = self.connection.request(action=action, data=req_data, headers=headers, method="POST") except DurableDNSException: e = sys.exc_info()[1] if 'Record does not exists' in e.message: raise RecordDoesNotExistError(record_id=record.id, driver=self, value=e.message) if 'Zone does not exist' in e.message: raise ZoneDoesNotExistError(zone_id=record.zone.id, driver=self, value=e.message) raise e return response.status in [httplib.OK] def _to_zone(self, item): extra = item.get('extra') # DurableDNS does not return information about zone type. This will be # set as master by default. zone = Zone(id=item.get('id'), type='master', domain=item.get('id'), ttl=item.get('ttl'), driver=self, extra=extra) return zone def _to_zones(self, items): zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone=None): extra = {'aux': int(item.get('aux')), 'ttl': int(item.get('ttl'))} record = Record(id=item.get('id'), type=item.get('type'), zone=zone, name=item.get('name'), data=item.get('data'), driver=self, ttl=item.get('ttl', None), extra=extra) return record def _to_records(self, items, zone=None): records = [] for item in items: records.append(self._to_record(item, zone)) return records apache-libcloud-2.2.1/libcloud/dns/drivers/pointdns.py0000664000175000017500000007034112704474244022702 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Point DNS Driver """ __all__ = [ 'PointDNSException', 'Redirect', 'MailRedirect', 'PointDNSDriver' ] import sys try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.common.types import ProviderError from libcloud.common.types import MalformedResponseError from libcloud.common.pointdns import PointDNSConnection from libcloud.common.exceptions import BaseHTTPError from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record class PointDNSException(ProviderError): def __init__(self, value, http_code, driver=None): super(PointDNSException, self).__init__(value=value, http_code=http_code, driver=driver) self.args = (http_code, value) class Redirect(object): """ Point DNS redirect. """ def __init__(self, id, name, data, type, driver, zone, iframe=None, query=False): """ :param id: Redirect id. :type id: ``str`` :param name: The FQDN for the record. :type name: ``str`` :param data: The data field. (redirect_to) :type data: ``str`` :param type: The type of redirects 301, 302 or 0 for iframes. :type type: ``str`` :param driver: DNSDriver instance. :type driver: :class:`DNSDriver` :param zone: Zone where redirect belongs. :type zone: :class:`Zone` :param iframe: Title of iframe (optional). :type iframe: ``str`` :param query: boolean Information about including query string when redirecting. (optional). :type query: ``bool`` """ self.id = str(id) if id else None self.name = name self.data = data self.type = str(type) if type else None self.driver = driver self.zone = zone self.iframe = iframe self.query = query def update(self, data, name=None, type=None, iframe=None, query=None): return self.driver.ex_update_redirect(redirect=self, name=name, data=data, type=type, iframe=iframe, query=query) def delete(self): return self.driver.ex_delete_redirect(redirect=self) def __repr__(self): return ('' % (self.name, self.data, self.type)) class MailRedirect(object): """ Point DNS mail redirect. """ def __init__(self, id, source, destination, zone, driver): """ :param id: MailRedirect id. :type id: ``str`` :param source: The source address of mail redirect. :type source: ``str`` :param destination: The destination address of mail redirect. :type destination: ``str`` :param zone: Zone where mail redirect belongs. :type zone: :class:`Zone` :param driver: DNSDriver instance. :type driver: :class:`DNSDriver` """ self.id = str(id) if id else None self.source = source self.destination = destination self.zone = zone self.driver = driver def update(self, destination, source=None): return self.driver.ex_update_mail_redirect(mail_r=self, destination=destination, source=None) def delete(self): return self.driver.ex_delete_mail_redirect(mail_r=self) def __repr__(self): return ('' % (self.source, self.destination, self.zone.id)) class PointDNSDriver(DNSDriver): type = Provider.POINTDNS name = 'Point DNS' website = 'https://pointhq.com/' connectionCls = PointDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.ALIAS: 'ALIAS', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SRV: 'SRV', RecordType.SSHFP: 'SSHFP', RecordType.TXT: 'TXT' } def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ response = self.connection.request('/zones') zones = self._to_zones(response.object) return zones def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ response = self.connection.request('/zones/%s/records' % zone.id) records = self._to_records(response.object, zone) return records def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ try: response = self.connection.request('/zones/%s' % zone_id) except MalformedResponseError: e = sys.exc_info()[1] if e.body == 'Not found': raise ZoneDoesNotExistError(driver=self, value="The zone doesn't exists", zone_id=zone_id) raise e zone = self._to_zone(response.object) return zone def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ try: response = self.connection.request('/zones/%s/records/%s' % (zone_id, record_id)) except MalformedResponseError: e = sys.exc_info()[1] if e.body == 'Not found': raise RecordDoesNotExistError(value="Record doesn't exists", driver=self, record_id=record_id) raise e record = self._to_record(response.object, zone_id=zone_id) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (All zones are master by design). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ r_json = {'name': domain} if ttl is not None: r_json['ttl'] = ttl if extra is not None: r_json.update(extra) r_data = json.dumps({'zone': r_json}) try: response = self.connection.request('/zones', method='POST', data=r_data) except BaseHTTPError: e = sys.exc_info()[1] raise PointDNSException(value=e.message, http_code=e.code, driver=self) zone = self._to_zone(response.object) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ r_json = {'name': name, 'data': data, 'record_type': type} if extra is not None: r_json.update(extra) r_data = json.dumps({'zone_record': r_json}) try: response = self.connection.request('/zones/%s/records' % zone.id, method='POST', data=r_data) except BaseHTTPError: e = sys.exc_info()[1] raise PointDNSException(value=e.message, http_code=e.code, driver=self) record = self._to_record(response.object, zone=zone) return record def update_zone(self, zone, domain, type='master', ttl=None, extra=None): """ Update an existing zone. :param zone: Zone to update. :type zone: :class:`Zone` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (All zones are master by design). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (group, user-id). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ r_json = {'name': domain} if extra is not None: r_json.update(extra) r_data = json.dumps({'zone': r_json}) try: response = self.connection.request('/zones/%s' % zone.id, method='PUT', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise ZoneDoesNotExistError(value="Zone doesn't exists", driver=self, zone_id=zone.id) raise PointDNSException(value=e.message, http_code=e.code, driver=self) zone = self._to_zone(response.object) return zone def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ zone = record.zone r_json = {'name': name, 'data': data, 'record_type': type} if extra is not None: r_json.update(extra) r_data = json.dumps({'zone_record': r_json}) try: response = self.connection.request('/zones/%s/records/%s' % (zone.id, record.id), method='PUT', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise RecordDoesNotExistError(value="Record doesn't exists", driver=self, record_id=record.id) raise PointDNSException(value=e.message, http_code=e.code, driver=self) record = self._to_record(response.object, zone=zone) return record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ try: self.connection.request('/zones/%s' % zone.id, method='DELETE') except MalformedResponseError: e = sys.exc_info()[1] if e.body == 'Not found': raise ZoneDoesNotExistError(driver=self, value="The zone doesn't exists", zone_id=zone.id) raise e return True def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ zone_id = record.zone.id record_id = record.id try: self.connection.request('/zones/%s/records/%s' % (zone_id, record_id), method='DELETE') except MalformedResponseError: e = sys.exc_info()[1] if e.body == 'Not found': raise RecordDoesNotExistError(value="Record doesn't exists", driver=self, record_id=record_id) raise e return True def ex_list_redirects(self, zone): """ :param zone: Zone to list redirects for. :type zone: :class:`Zone` :rtype: ``list`` of :class:`Record` """ response = self.connection.request('/zones/%s/redirects' % zone.id) redirects = self._to_redirects(response.object, zone) return redirects def ex_list_mail_redirects(self, zone): """ :param zone: Zone to list redirects for. :type zone: :class:`Zone` :rtype: ``list`` of :class:`MailRedirect` """ response = self.connection.request('/zones/%s/mail_redirects' % zone.id) mail_redirects = self._to_mail_redirects(response.object, zone) return mail_redirects def ex_create_redirect(self, redirect_to, name, type, zone, iframe=None, query=None): """ :param redirect_to: The data field. (redirect_to) :type redirect_to: ``str`` :param name: The FQDN for the record. :type name: ``str`` :param type: The type of redirects 301, 302 or 0 for iframes. :type type: ``str`` :param zone: Zone to list redirects for. :type zone: :class:`Zone` :param iframe: Title of iframe (optional). :type iframe: ``str`` :param query: boolean Information about including query string when redirecting. (optional). :type query: ``bool`` :rtype: :class:`Record` """ r_json = {'name': name, 'redirect_to': redirect_to} if type is not None: r_json['redirect_type'] = type if iframe is not None: r_json['iframe_title'] = iframe if query is not None: r_json['redirect_query_string'] = query r_data = json.dumps({'zone_redirect': r_json}) try: response = self.connection.request('/zones/%s/redirects' % zone.id, method='POST', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] raise PointDNSException(value=e.message, http_code=e.code, driver=self) redirect = self._to_redirect(response.object, zone=zone) return redirect def ex_create_mail_redirect(self, destination, source, zone): """ :param destination: The destination address of mail redirect. :type destination: ``str`` :param source: The source address of mail redirect. :type source: ``str`` :param zone: Zone to list redirects for. :type zone: :class:`Zone` :rtype: ``list`` of :class:`MailRedirect` """ r_json = {'destination_address': destination, 'source_address': source} r_data = json.dumps({'zone_mail_redirect': r_json}) try: response = self.connection.request('/zones/%s/mail_redirects' % zone.id, method='POST', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] raise PointDNSException(value=e.message, http_code=e.code, driver=self) mail_redirect = self._to_mail_redirect(response.object, zone=zone) return mail_redirect def ex_get_redirect(self, zone_id, redirect_id): """ :param zone: Zone to list redirects for. :type zone: :class:`Zone` :param redirect_id: Redirect id. :type redirect_id: ``str`` :rtype: ``list`` of :class:`Redirect` """ try: response = self.connection.request('/zones/%s/redirects/%s' % (zone_id, redirect_id)) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) redirect = self._to_redirect(response.object, zone_id=zone_id) return redirect def ex_get_mail_redirects(self, zone_id, mail_r_id): """ :param zone: Zone to list redirects for. :type zone: :class:`Zone` :param mail_r_id: Mail redirect id. :type mail_r_id: ``str`` :rtype: ``list`` of :class:`MailRedirect` """ try: response = self.connection.request('/zones/%s/mail_redirects/%s' % (zone_id, mail_r_id)) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found mail redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) mail_redirect = self._to_mail_redirect(response.object, zone_id=zone_id) return mail_redirect def ex_update_redirect(self, redirect, redirect_to=None, name=None, type=None, iframe=None, query=None): """ :param redirect: Record to update :type id: :class:`Redirect` :param redirect_to: The data field. (optional). :type redirect_to: ``str`` :param name: The FQDN for the record. :type name: ``str`` :param type: The type of redirects 301, 302 or 0 for iframes. (optional). :type type: ``str`` :param iframe: Title of iframe (optional). :type iframe: ``str`` :param query: boolean Information about including query string when redirecting. (optional). :type query: ``bool`` :rtype: ``list`` of :class:`Redirect` """ zone_id = redirect.zone.id r_json = {} if redirect_to is not None: r_json['redirect_to'] = redirect_to if name is not None: r_json['name'] = name if type is not None: r_json['record_type'] = type if iframe is not None: r_json['iframe_title'] = iframe if query is not None: r_json['redirect_query_string'] = query r_data = json.dumps({'zone_redirect': r_json}) try: response = self.connection.request('/zones/%s/redirects/%s' % (zone_id, redirect.id), method='PUT', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) redirect = self._to_redirect(response.object, zone=redirect.zone) return redirect def ex_update_mail_redirect(self, mail_r, destination, source=None): """ :param mail_r: Mail redirect to update :type mail_r: :class:`MailRedirect` :param destination: The destination address of mail redirect. :type destination: ``str`` :param source: The source address of mail redirect. (optional) :type source: ``str`` :rtype: ``list`` of :class:`MailRedirect` """ zone_id = mail_r.zone.id r_json = {'destination_address': destination} if source is not None: r_json['source_address'] = source r_data = json.dumps({'zone_redirect': r_json}) try: response = self.connection.request('/zones/%s/mail_redirects/%s' % (zone_id, mail_r.id), method='PUT', data=r_data) except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found mail redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) mail_redirect = self._to_mail_redirect(response.object, zone=mail_r.zone) return mail_redirect def ex_delete_redirect(self, redirect): """ :param mail_r: Redirect to delete :type mail_r: :class:`Redirect` :rtype: ``bool`` """ zone_id = redirect.zone.id redirect_id = redirect.id try: self.connection.request('/zones/%s/redirects/%s' % (zone_id, redirect_id), method='DELETE') except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) return True def ex_delete_mail_redirect(self, mail_r): """ :param mail_r: Mail redirect to update :type mail_r: :class:`MailRedirect` :rtype: ``bool`` """ zone_id = mail_r.zone.id mail_r_id = mail_r.id try: self.connection.request('/zones/%s/mail_redirects/%s' % (zone_id, mail_r_id), method='DELETE') except (BaseHTTPError, MalformedResponseError): e = sys.exc_info()[1] if isinstance(e, MalformedResponseError) and e.body == 'Not found': raise PointDNSException(value='Couldn\'t found mail redirect', http_code=httplib.NOT_FOUND, driver=self) raise PointDNSException(value=e.message, http_code=e.code, driver=self) return True def _to_zones(self, data): zones = [] for zone in data: _zone = self._to_zone(zone) zones.append(_zone) return zones def _to_zone(self, data): zone = data.get('zone') id = zone.get('id') name = zone.get('name') ttl = zone.get('ttl') extra = {'group': zone.get('group'), 'user-id': zone.get('user-id')} # All zones are a primary ones by design, so they # assume that are the master source of info about the # zone, which is the case when domain DNS records # points to PointDNS nameservers. type = 'master' return Zone(id=id, domain=name, type=type, ttl=ttl, driver=self, extra=extra) def _to_records(self, data, zone): records = [] for item in data: record = self._to_record(item, zone=zone) records.append(record) return records def _to_record(self, data, zone_id=None, zone=None): if not zone: # We need zone_id or zone zone = self.get_zone(zone_id) record = data.get('zone_record') id = record.get('id') name = record.get('name') type = record.get('record_type') data = record.get('data') extra = {'ttl': record.get('ttl'), 'zone_id': record.get('zone_id'), 'aux': record.get('aux')} return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, ttl=record.get('ttl', None), extra=extra) def _to_redirects(self, data, zone): redirects = [] for item in data: redirect = self._to_redirect(item, zone=zone) redirects.append(redirect) return redirects def _to_redirect(self, data, zone_id=None, zone=None): if not zone: # We need zone_id or zone zone = self.get_zone(zone_id) record = data.get('zone_redirect') id = record.get('id') name = record.get('name') redirect_to = record.get('redirect_to') type = record.get('redirect_type') iframe = record.get('iframe_title') query = record.get('redirect_query_string') return Redirect(id, name, redirect_to, type, self, zone, iframe=iframe, query=query) def _to_mail_redirects(self, data, zone): mail_redirects = [] for item in data: mail_redirect = self._to_mail_redirect(item, zone=zone) mail_redirects.append(mail_redirect) return mail_redirects def _to_mail_redirect(self, data, zone_id=None, zone=None): if not zone: # We need zone_id or zone zone = self.get_zone(zone_id) record = data.get('zone_mail_redirect') id = record.get('id') destination = record.get('destination_address') source = record.get('source_address') return MailRedirect(id, source, destination, zone, self) apache-libcloud-2.2.1/libcloud/dns/base.py0000664000175000017500000003522012704474244020275 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import datetime from libcloud import __version__ from libcloud.common.base import ConnectionUserAndKey, BaseDriver from libcloud.dns.types import RecordType __all__ = [ 'Zone', 'Record', 'DNSDriver' ] class Zone(object): """ DNS zone. """ def __init__(self, id, domain, type, ttl, driver, extra=None): """ :param id: Zone id. :type id: ``str`` :param domain: The name of the domain. :type domain: ``str`` :param type: Zone type (master, slave). :type type: ``str`` :param ttl: Default TTL for records in this zone (in seconds). :type ttl: ``int`` :param driver: DNSDriver instance. :type driver: :class:`DNSDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.domain = domain self.type = type self.ttl = ttl or None self.driver = driver self.extra = extra or {} def list_records(self): return self.driver.list_records(zone=self) def create_record(self, name, type, data, extra=None): return self.driver.create_record(name=name, zone=self, type=type, data=data, extra=extra) def update(self, domain=None, type=None, ttl=None, extra=None): return self.driver.update_zone(zone=self, domain=domain, type=type, ttl=ttl, extra=extra) def delete(self): return self.driver.delete_zone(zone=self) def export_to_bind_format(self): return self.driver.export_zone_to_bind_format(zone=self) def export_to_bind_zone_file(self, file_path): self.driver.export_zone_to_bind_zone_file(zone=self, file_path=file_path) def __repr__(self): return ('' % (self.domain, self.ttl, self.driver.name)) class Record(object): """ Zone record / resource. """ def __init__(self, id, name, type, data, zone, driver, ttl=None, extra=None): """ :param id: Record id :type id: ``str`` :param name: Hostname or FQDN. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param zone: Zone instance. :type zone: :class:`Zone` :param driver: DNSDriver instance. :type driver: :class:`DNSDriver` :param ttl: Record TTL. :type ttl: ``int`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.type = type self.data = data self.zone = zone self.driver = driver self.ttl = ttl self.extra = extra or {} def update(self, name=None, type=None, data=None, extra=None): return self.driver.update_record(record=self, name=name, type=type, data=data, extra=extra) def delete(self): return self.driver.delete_record(record=self) def _get_numeric_id(self): record_id = self.id if record_id.isdigit(): record_id = int(record_id) return record_id def __repr__(self): zone = self.zone.domain if self.zone.domain else self.zone.id return ('' % (zone, self.name, self.type, self.data, self.driver.name, self.ttl)) class DNSDriver(BaseDriver): """ A base DNSDriver class to derive from This class is always subclassed by a specific driver. """ connectionCls = ConnectionUserAndKey name = None website = None # Map libcloud record type enum to provider record type name RECORD_TYPE_MAP = {} def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: ``None`` """ super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def list_record_types(self): """ Return a list of RecordType objects supported by the provider. :return: ``list`` of :class:`RecordType` """ return list(self.RECORD_TYPE_MAP.keys()) def iterate_zones(self): """ Return a generator to iterate over available zones. :rtype: ``generator`` of :class:`Zone` """ raise NotImplementedError( 'iterate_zones not implemented for this driver') def list_zones(self): """ Return a list of zones. :return: ``list`` of :class:`Zone` """ return list(self.iterate_zones()) def iterate_records(self, zone): """ Return a generator to iterate over records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :rtype: ``generator`` of :class:`Record` """ raise NotImplementedError( 'iterate_records not implemented for this driver') def list_records(self, zone): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ return list(self.iterate_records(zone)) def get_zone(self, zone_id): """ Return a Zone instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :rtype: :class:`Zone` """ raise NotImplementedError( 'get_zone not implemented for this driver') def get_record(self, zone_id, record_id): """ Return a Record instance. :param zone_id: ID of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class:`Record` """ raise NotImplementedError( 'get_record not implemented for this driver') def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ raise NotImplementedError( 'create_zone not implemented for this driver') def update_zone(self, zone, domain, type='master', ttl=None, extra=None): """ Update an existing zone. :param zone: Zone to update. :type zone: :class:`Zone` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Zone` """ raise NotImplementedError( 'update_zone not implemented for this driver') def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ raise NotImplementedError( 'create_record not implemented for this driver') def update_record(self, record, name, type, data, extra=None): """ Update an existing record. :param record: Record to update. :type record: :class:`Record` :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: :class:`Record` """ raise NotImplementedError( 'update_record not implemented for this driver') def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ raise NotImplementedError( 'delete_zone not implemented for this driver') def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ raise NotImplementedError( 'delete_record not implemented for this driver') def export_zone_to_bind_format(self, zone): """ Export Zone object to the BIND compatible format. :param zone: Zone to export. :type zone: :class:`Zone` :return: Zone data in BIND compatible format. :rtype: ``str`` """ if zone.type != 'master': raise ValueError('You can only generate BIND out for master zones') lines = [] # For consistent output, records are sorted based on the id records = zone.list_records() records = sorted(records, key=Record._get_numeric_id) date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S') values = {'version': __version__, 'date': date} lines.append('; Generated by Libcloud v%(version)s on %(date)s' % values) lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain}) lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl}) for record in records: line = self._get_bind_record_line(record=record) lines.append(line) output = '\n'.join(lines) return output def export_zone_to_bind_zone_file(self, zone, file_path): """ Export Zone object to the BIND compatible format and write result to a file. :param zone: Zone to export. :type zone: :class:`Zone` :param file_path: File path where the output will be saved. :type file_path: ``str`` """ result = self.export_zone_to_bind_format(zone=zone) with open(file_path, 'w') as fp: fp.write(result) def _get_bind_record_line(self, record): """ Generate BIND record line for the provided record. :param record: Record to generate the line for. :type record: :class:`Record` :return: Bind compatible record line. :rtype: ``str`` """ parts = [] if record.name: name = '%(name)s.%(domain)s' % {'name': record.name, 'domain': record.zone.domain} else: name = record.zone.domain name += '.' ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl ttl = str(ttl) data = record.data if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX, RecordType.PTR, RecordType.SRV]: # Make sure trailing dot is present if data[len(data) - 1] != '.': data += '.' if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data: # Escape the quotes data = data.replace('"', '\\"') # Quote the string data = '"%s"' % (data) if record.type in [RecordType.MX, RecordType.SRV]: priority = str(record.extra['priority']) parts = [name, ttl, 'IN', record.type, priority, data] else: parts = [name, ttl, 'IN', record.type, data] line = '\t'.join(parts) return line def _string_to_record_type(self, string): """ Return a string representation of a DNS record type to a libcloud RecordType ENUM. :rtype: ``str`` """ string = string.upper() record_type = getattr(RecordType, string) return record_type apache-libcloud-2.2.1/libcloud/pricing.py0000664000175000017500000001533313153541406020227 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement """ A class which handles loading the pricing files. """ import os.path from os.path import join as pjoin try: import simplejson as json try: JSONDecodeError = json.JSONDecodeError except AttributeError: # simplejson < 2.1.0 does not have the JSONDecodeError exception class JSONDecodeError = ValueError except ImportError: import json JSONDecodeError = ValueError from libcloud.utils.connection import get_response_object __all__ = [ 'get_pricing', 'get_size_price', 'set_pricing', 'clear_pricing_data', 'download_pricing_file' ] # Default URL to the pricing file DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json') CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json') # Pricing data cache PRICING_DATA = { 'compute': {}, 'storage': {} } VALID_PRICING_DRIVER_TYPES = ['compute', 'storage'] def get_pricing_file_path(file_path=None): if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \ os.path.isfile(CUSTOM_PRICING_FILE_PATH): # Custom pricing file is available, use it return CUSTOM_PRICING_FILE_PATH return DEFAULT_PRICING_FILE_PATH def get_pricing(driver_type, driver_name, pricing_file_path=None): """ Return pricing for the provided driver. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type pricing_file_path: ``str`` :param pricing_file_path: Custom path to a price file. If not provided it uses a default path. :rtype: ``dict`` :return: Dictionary with pricing where a key name is size ID and the value is a price. """ if driver_type not in VALID_PRICING_DRIVER_TYPES: raise AttributeError('Invalid driver type: %s', driver_type) if driver_name in PRICING_DATA[driver_type]: return PRICING_DATA[driver_type][driver_name] if not pricing_file_path: pricing_file_path = get_pricing_file_path(file_path=pricing_file_path) with open(pricing_file_path) as fp: content = fp.read() pricing_data = json.loads(content) size_pricing = pricing_data[driver_type][driver_name] for driver_type in VALID_PRICING_DRIVER_TYPES: # pylint: disable=maybe-no-member pricing = pricing_data.get(driver_type, None) if pricing: PRICING_DATA[driver_type] = pricing return size_pricing def set_pricing(driver_type, driver_name, pricing): """ Populate the driver pricing dictionary. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type pricing: ``dict`` :param pricing: Dictionary where a key is a size ID and a value is a price. """ PRICING_DATA[driver_type][driver_name] = pricing def get_size_price(driver_type, driver_name, size_id): """ Return price for the provided size. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type size_id: ``str`` or ``int`` :param size_id: Unique size ID (can be an integer or a string - depends on the driver) :rtype: ``float`` :return: Size price. """ pricing = get_pricing(driver_type=driver_type, driver_name=driver_name) try: price = float(pricing[size_id]) except KeyError: # Price not available price = None return price def invalidate_pricing_cache(): """ Invalidate pricing cache for all the drivers. """ PRICING_DATA['compute'] = {} PRICING_DATA['storage'] = {} def clear_pricing_data(): """ Invalidate pricing cache for all the drivers. Note: This method does the same thing as invalidate_pricing_cache and is here for backward compatibility reasons. """ invalidate_pricing_cache() def invalidate_module_pricing_cache(driver_type, driver_name): """ Invalidate the cache for the specified driver. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name """ if driver_name in PRICING_DATA[driver_type]: del PRICING_DATA[driver_type][driver_name] def download_pricing_file(file_url=DEFAULT_FILE_URL, file_path=CUSTOM_PRICING_FILE_PATH): """ Download pricing file from the file_url and save it to file_path. :type file_url: ``str`` :param file_url: URL pointing to the pricing file. :type file_path: ``str`` :param file_path: Path where a download pricing file will be saved. """ dir_name = os.path.dirname(file_path) if not os.path.exists(dir_name): # Verify a valid path is provided msg = ('Can\'t write to %s, directory %s, doesn\'t exist' % (file_path, dir_name)) raise ValueError(msg) if os.path.exists(file_path) and os.path.isdir(file_path): msg = ('Can\'t write to %s file path because it\'s a' ' directory' % (file_path)) raise ValueError(msg) response = get_response_object(file_url) body = response.body # Verify pricing file is valid try: data = json.loads(body) except JSONDecodeError: msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) # pylint: disable=maybe-no-member if not data.get('updated', None): msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) # No need to stream it since file is small with open(file_path, 'w') as file_handle: file_handle.write(body) apache-libcloud-2.2.1/libcloud/security.py0000664000175000017500000000560613153541406020445 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Security (SSL) Settings Usage: import libcloud.security libcloud.security.VERIFY_SSL_CERT = True # Optional. libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt') """ import os import ssl __all__ = [ 'VERIFY_SSL_CERT', 'SSL_VERSION', 'CA_CERTS_PATH' ] VERIFY_SSL_CERT = True SSL_VERSION = ssl.PROTOCOL_TLSv1 # True to use certifi CA bundle path when certifi library is available USE_CERTIFI = os.environ.get('LIBCLOUD_SSL_USE_CERTIFI', True) USE_CERTIFI = str(USE_CERTIFI).lower() in ['true', '1'] # File containing one or more PEM-encoded CA certificates # concatenated together. CA_CERTS_PATH = None # Insert certifi CA bundle path to the front of Libcloud CA bundle search # path if certifi is available try: import certifi except ImportError: has_certifi = False else: has_certifi = True if has_certifi and USE_CERTIFI: certifi_ca_bundle_path = certifi.where() CA_CERTS_PATH = certifi_ca_bundle_path # Allow user to explicitly specify which CA bundle to use, using an environment # variable environment_cert_file = os.getenv('SSL_CERT_FILE', None) if environment_cert_file is not None: # Make sure the file exists if not os.path.exists(environment_cert_file): raise ValueError('Certificate file %s doesn\'t exist' % (environment_cert_file)) if not os.path.isfile(environment_cert_file): raise ValueError('Certificate file can\'t be a directory') # If a provided file exists we ignore other common paths because we # don't want to fall-back to a potentially less restrictive bundle CA_CERTS_PATH = [environment_cert_file] CA_CERTS_UNAVAILABLE_ERROR_MSG = ( 'No CA Certificates were found in CA_CERTS_PATH. For information on ' 'how to get required certificate files, please visit ' 'https://libcloud.readthedocs.org/en/latest/other/' 'ssl-certificate-validation.html' ) VERIFY_SSL_DISABLED_MSG = ( 'SSL certificate verification is disabled, this can pose a ' 'security risk. For more information how to enable the SSL ' 'certificate verification, please visit the libcloud ' 'documentation.' ) apache-libcloud-2.2.1/libcloud/storage/0000775000175000017500000000000013160535107017660 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/storage/providers.py0000664000175000017500000001026613153541406022255 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.storage.types import Provider from libcloud.storage.types import OLD_CONSTANT_TO_NEW_MAPPING from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver DRIVERS = { Provider.DUMMY: ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), Provider.CLOUDFILES: ('libcloud.storage.drivers.cloudfiles', 'CloudFilesStorageDriver'), Provider.OPENSTACK_SWIFT: ('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver'), Provider.S3: ('libcloud.storage.drivers.s3', 'S3StorageDriver'), Provider.S3_US_EAST2: ('libcloud.storage.drivers.s3', 'S3USEast2StorageDriver'), Provider.S3_US_WEST: ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'), Provider.S3_US_WEST_OREGON: ('libcloud.storage.drivers.s3', 'S3USWestOregonStorageDriver'), Provider.S3_US_GOV_WEST: ('libcloud.storage.drivers.s3', 'S3USGovWestStorageDriver'), Provider.S3_CN_NORTH: ('libcloud.storage.drivers.s3', 'S3CNNorthStorageDriver'), Provider.S3_EU_WEST: ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'), Provider.S3_EU_WEST2: ('libcloud.storage.drivers.s3', 'S3EUWest2StorageDriver'), Provider.S3_EU_CENTRAL: ('libcloud.storage.drivers.s3', 'S3EUCentralStorageDriver'), Provider.S3_AP_SOUTH: ('libcloud.storage.drivers.s3', 'S3APSouthStorageDriver'), Provider.S3_AP_SOUTHEAST: ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'), Provider.S3_AP_SOUTHEAST2: ('libcloud.storage.drivers.s3', 'S3APSE2StorageDriver'), Provider.S3_AP_NORTHEAST: ('libcloud.storage.drivers.s3', 'S3APNE1StorageDriver'), Provider.S3_AP_NORTHEAST1: ('libcloud.storage.drivers.s3', 'S3APNE1StorageDriver'), Provider.S3_AP_NORTHEAST2: ('libcloud.storage.drivers.s3', 'S3APNE2StorageDriver'), Provider.S3_SA_EAST: ('libcloud.storage.drivers.s3', 'S3SAEastStorageDriver'), Provider.S3_CA_CENTRAL: ('libcloud.storage.drivers.s3', 'S3CACentralStorageDriver'), Provider.S3_RGW: ('libcloud.storage.drivers.rgw', 'S3RGWStorageDriver'), Provider.S3_RGW_OUTSCALE: ('libcloud.storage.drivers.rgw', 'S3RGWOutscaleStorageDriver'), Provider.NINEFOLD: ('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'), Provider.GOOGLE_STORAGE: ('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver'), Provider.NIMBUS: ('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'), Provider.LOCAL: ('libcloud.storage.drivers.local', 'LocalStorageDriver'), Provider.AZURE_BLOBS: ('libcloud.storage.drivers.azure_blobs', 'AzureBlobsStorageDriver'), Provider.KTUCLOUD: ('libcloud.storage.drivers.ktucloud', 'KTUCloudStorageDriver'), Provider.AURORAOBJECTS: ('libcloud.storage.drivers.auroraobjects', 'AuroraObjectsStorageDriver'), Provider.BACKBLAZE_B2: ('libcloud.storage.drivers.backblaze_b2', 'BackblazeB2StorageDriver'), Provider.ALIYUN_OSS: ('libcloud.storage.drivers.oss', 'OSSStorageDriver'), } def get_driver(provider): deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING return _get_provider_driver(drivers=DRIVERS, provider=provider, deprecated_constants=deprecated_constants) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/storage/__init__.py0000664000175000017500000000005012701023453021760 0ustar kamikami00000000000000""" Module for working with Storage """ apache-libcloud-2.2.1/libcloud/storage/types.py0000664000175000017500000001177713153541406021414 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.types import LibcloudError __all__ = [ 'Provider', 'ContainerError', 'ObjectError', 'ContainerAlreadyExistsError', 'ContainerDoesNotExistError', 'ContainerIsNotEmptyError', 'ObjectDoesNotExistError', 'ObjectHashMismatchError', 'InvalidContainerNameError', 'OLD_CONSTANT_TO_NEW_MAPPING' ] class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. :cvar DUMMY: Example provider :cvar ALIYUN_OSS: Aliyun OSS storage driver :cvar AURORAOBJECTS: AuroraObjects storage driver :cvar CLOUDFILES: CloudFiles :cvar GOOGLE_STORAGE Google Storage :cvar LOCAL: Local storage driver :cvar NIMBUS: Nimbus.io driver :cvar NINEFOLD: Ninefold :cvar S3: Amazon S3 US :cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo) :cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore) :cvar S3_AP_SOUTHEAST2_HOST: Amazon S3 Asia South East 2 (Sydney) :cvar S3_CN_NORTH: Amazon S3 CN North (Beijing) :cvar S3_EU_WEST: Amazon S3 EU West (Ireland) :cvar S3_US_WEST: Amazon S3 US West (Northern California) :cvar S3_US_WEST_OREGON: Amazon S3 US West 2 (Oregon) :cvar S3_RGW: S3 RGW :cvar S3_RGW_OUTSCALE: OUTSCALE S3 RGW """ DUMMY = 'dummy' ALIYUN_OSS = 'aliyun_oss' AURORAOBJECTS = 'auroraobjects' AZURE_BLOBS = 'azure_blobs' BACKBLAZE_B2 = 'backblaze_b2' CLOUDFILES = 'cloudfiles' GOOGLE_STORAGE = 'google_storage' KTUCLOUD = 'ktucloud' LOCAL = 'local' NIMBUS = 'nimbus' NINEFOLD = 'ninefold' OPENSTACK_SWIFT = 'openstack_swift' S3 = 's3' S3_AP_NORTHEAST = 's3_ap_northeast' S3_AP_NORTHEAST1 = 's3_ap_northeast_1' S3_AP_NORTHEAST2 = 's3_ap_northeast_2' S3_AP_SOUTH = 's3_ap_south' S3_AP_SOUTHEAST = 's3_ap_southeast' S3_AP_SOUTHEAST2 = 's3_ap_southeast2' S3_CA_CENTRAL = 's3_ca_central' S3_CN_NORTH = 's3_cn_north' S3_EU_WEST = 's3_eu_west' S3_EU_WEST2 = 's3_eu_west_2' S3_EU_CENTRAL = 's3_eu_central' S3_SA_EAST = 's3_sa_east' S3_US_EAST2 = 's3_us_east_2' S3_US_WEST = 's3_us_west' S3_US_WEST_OREGON = 's3_us_west_oregon' S3_US_GOV_WEST = 's3_us_gov_west' S3_RGW = 's3_rgw' S3_RGW_OUTSCALE = 's3_rgw_outscale' # Deperecated CLOUDFILES_US = 'cloudfiles_us' CLOUDFILES_UK = 'cloudfiles_uk' CLOUDFILES_SWIFT = 'cloudfiles_swift' OLD_CONSTANT_TO_NEW_MAPPING = { # CloudFiles Provider.CLOUDFILES_US: Provider.CLOUDFILES, Provider.CLOUDFILES_UK: Provider.CLOUDFILES_UK, Provider.CLOUDFILES_SWIFT: Provider.OPENSTACK_SWIFT } class ContainerError(LibcloudError): error_type = 'ContainerError' def __init__(self, value, driver, container_name): self.container_name = container_name super(ContainerError, self).__init__(value=value, driver=driver) def __str__(self): return ('<%s in %s, container=%s, value=%s>' % (self.error_type, repr(self.driver), self.container_name, self.value)) class ObjectError(LibcloudError): error_type = 'ContainerError' def __init__(self, value, driver, object_name): self.object_name = object_name super(ObjectError, self).__init__(value=value, driver=driver) def __str__(self): return self.__repr__() def __repr__(self): return '<%s in %s, value=%s, object = %s>' % (self.error_type, repr(self.driver), self.value, self.object_name) class ContainerAlreadyExistsError(ContainerError): error_type = 'ContainerAlreadyExistsError' class ContainerDoesNotExistError(ContainerError): error_type = 'ContainerDoesNotExistError' class ContainerIsNotEmptyError(ContainerError): error_type = 'ContainerIsNotEmptyError' class ObjectDoesNotExistError(ObjectError): error_type = 'ObjectDoesNotExistError' class ObjectHashMismatchError(ObjectError): error_type = 'ObjectHashMismatchError' class InvalidContainerNameError(ContainerError): error_type = 'InvalidContainerNameError' apache-libcloud-2.2.1/libcloud/storage/drivers/0000775000175000017500000000000013160535107021336 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/storage/drivers/cloudfiles.py0000664000175000017500000011076413153541406024053 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from hashlib import sha1 import hmac import os from time import time from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import b from libcloud.utils.py3 import urlquote if PY3: from io import FileIO as file from libcloud.utils.files import read_in_chunks from libcloud.common.types import MalformedResponseError, LibcloudError from libcloud.common.base import Response, RawResponse from libcloud.storage.providers import Provider from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.types import InvalidContainerNameError from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.rackspace import AUTH_URL CDN_HOST = 'cdn.clouddrive.com' API_VERSION = 'v1.0' # Keys which are used to select a correct endpoint from the service catalog. INTERNAL_ENDPOINT_KEY = 'internalURL' PUBLIC_ENDPOINT_KEY = 'publicURL' class CloudFilesResponse(Response): valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT] def success(self): i = int(self.status) return i >= 200 and i <= 299 or i in self.valid_response_codes def parse_body(self): if not self.body: return None if 'content-type' in self.headers: key = 'content-type' elif 'Content-Type' in self.headers: key = 'Content-Type' else: raise LibcloudError('Missing content-type header') content_type = self.headers[key] if content_type.find(';') != -1: content_type = content_type.split(';')[0] if content_type == 'application/json': try: data = json.loads(self.body) except: raise MalformedResponseError('Failed to parse JSON', body=self.body, driver=CloudFilesStorageDriver) elif content_type == 'text/plain': data = self.body else: data = self.body return data class CloudFilesRawResponse(CloudFilesResponse, RawResponse): pass class OpenStackSwiftConnection(OpenStackBaseConnection): """ Connection class for the OpenStack Swift endpoint. """ responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse auth_url = AUTH_URL _auth_version = '1.0' # TODO: Reverse the relationship - Swift -> CloudFiles def __init__(self, user_id, key, secure=True, **kwargs): # Ignore this for now kwargs.pop('use_internal_url', None) super(OpenStackSwiftConnection, self).__init__(user_id, key, secure=secure, **kwargs) self.api_version = API_VERSION self.accept_format = 'application/json' self._service_type = self._ex_force_service_type or 'object-store' self._service_name = self._ex_force_service_name or 'swift' if self._ex_force_service_region: self._service_region = self._ex_force_service_region else: self._service_region = None def get_endpoint(self, *args, **kwargs): if ('2.0' in self._auth_version) or ('3.x' in self._auth_version): endpoint = self.service_catalog.get_endpoint( service_type=self._service_type, name=self._service_name, region=self._service_region) elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): endpoint = self.service_catalog.get_endpoint( name=self._service_name, region=self._service_region) else: endpoint = None if endpoint: return endpoint.url else: raise LibcloudError('Could not find specified endpoint') def request(self, action, params=None, data='', headers=None, method='GET', raw=False, cdn_request=False): if not headers: headers = {} if not params: params = {} self.cdn_request = cdn_request params['format'] = 'json' if method in ['POST', 'PUT'] and 'Content-Type' not in headers: headers.update({'Content-Type': 'application/json; charset=UTF-8'}) return super(OpenStackSwiftConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=raw) class CloudFilesConnection(OpenStackSwiftConnection): """ Base connection class for the Cloudfiles driver. """ responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse auth_url = AUTH_URL _auth_version = '2.0' def __init__(self, user_id, key, secure=True, use_internal_url=False, **kwargs): super(CloudFilesConnection, self).__init__(user_id, key, secure=secure, **kwargs) self.api_version = API_VERSION self.accept_format = 'application/json' self.cdn_request = False self.use_internal_url = use_internal_url def get_endpoint(self): region = self._ex_force_service_region.upper() if self.use_internal_url: endpoint_type = 'internal' else: endpoint_type = 'external' if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint( service_type='object-store', name='cloudFiles', region=region, endpoint_type=endpoint_type) cdn_ep = self.service_catalog.get_endpoint( service_type='rax:object-cdn', name='cloudFilesCDN', region=region, endpoint_type=endpoint_type) else: raise LibcloudError( 'Auth version "%s" not supported' % (self._auth_version)) # if this is a CDN request, return the cdn url instead if self.cdn_request: ep = cdn_ep if not ep or not ep.url: raise LibcloudError('Could not find specified endpoint') return ep.url def request(self, action, params=None, data='', headers=None, method='GET', raw=False, cdn_request=False): if not headers: headers = {} if not params: params = {} self.cdn_request = cdn_request params['format'] = 'json' if method in ['POST', 'PUT'] and 'Content-Type' not in headers: headers.update({'Content-Type': 'application/json; charset=UTF-8'}) return super(CloudFilesConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=raw, cdn_request=cdn_request) class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): """ CloudFiles driver. """ name = 'CloudFiles' website = 'http://www.rackspace.com/' connectionCls = CloudFilesConnection hash_type = 'md5' supports_chunked_encoding = True def __init__(self, key, secret=None, secure=True, host=None, port=None, region='ord', use_internal_url=False, **kwargs): """ @inherits: :class:`StorageDriver.__init__` :param region: ID of the region which should be used. :type region: ``str`` """ # This is here for backard compatibility if 'ex_force_service_region' in kwargs: region = kwargs['ex_force_service_region'] self.use_internal_url = use_internal_url OpenStackDriverMixin.__init__(self, **kwargs) super(CloudFilesStorageDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) @classmethod def list_regions(cls): return ['ord', 'dfw', 'iad', 'lon', 'hkg', 'syd'] def iterate_containers(self): response = self.connection.request('') if response.status == httplib.NO_CONTENT: return [] elif response.status == httplib.OK: return self._to_container_list(json.loads(response.body)) raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container(self, container_name): container_name_encoded = self._encode_container_name(container_name) response = self.connection.request('/%s' % (container_name_encoded), method='HEAD') if response.status == httplib.NO_CONTENT: container = self._headers_to_container( container_name, response.headers) return container elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(None, self, container_name) raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_object(self, container_name, object_name): container = self.get_container(container_name) container_name_encoded = self._encode_container_name(container_name) object_name_encoded = self._encode_object_name(object_name) response = self.connection.request('/%s/%s' % (container_name_encoded, object_name_encoded), method='HEAD') if response.status in [httplib.OK, httplib.NO_CONTENT]: obj = self._headers_to_object( object_name, container, response.headers) return obj elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(None, self, object_name) raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container_cdn_url(self, container, ex_ssl_uri=False): # pylint: disable=unexpected-keyword-arg container_name_encoded = self._encode_container_name(container.name) response = self.connection.request('/%s' % (container_name_encoded), method='HEAD', cdn_request=True) if response.status == httplib.NO_CONTENT: if ex_ssl_uri: cdn_url = response.headers['x-cdn-ssl-uri'] else: cdn_url = response.headers['x-cdn-uri'] return cdn_url elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value='', container_name=container.name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_object_cdn_url(self, obj): container_cdn_url = self.get_container_cdn_url(container=obj.container) return '%s/%s' % (container_cdn_url, obj.name) def enable_container_cdn(self, container, ex_ttl=None): """ @inherits: :class:`StorageDriver.enable_container_cdn` :param ex_ttl: cache time to live :type ex_ttl: ``int`` """ container_name = self._encode_container_name(container.name) headers = {'X-CDN-Enabled': 'True'} if ex_ttl: headers['X-TTL'] = ex_ttl # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='PUT', headers=headers, cdn_request=True) return response.status in [httplib.CREATED, httplib.ACCEPTED] def create_container(self, container_name): container_name_encoded = self._encode_container_name(container_name) response = self.connection.request( '/%s' % (container_name_encoded), method='PUT') if response.status == httplib.CREATED: # Accepted mean that container is not yet created but it will be # eventually extra = {'object_count': 0} container = Container(name=container_name, extra=extra, driver=self) return container elif response.status == httplib.ACCEPTED: error = ContainerAlreadyExistsError(None, self, container_name) raise error raise LibcloudError('Unexpected status code: %s' % (response.status)) def delete_container(self, container): name = self._encode_container_name(container.name) # Only empty container can be deleted response = self.connection.request('/%s' % (name), method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value='', container_name=name, driver=self) elif response.status == httplib.CONFLICT: # @TODO: Add "delete_all_objects" parameter? raise ContainerIsNotEmptyError(value='', container_name=name, driver=self) def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): container_name = obj.container.name object_name = obj.name response = self.connection.request('/%s/%s' % (container_name, object_name), method='GET', raw=True) return self._get_object( obj=obj, callback=self._save_object, response=response, callback_kwargs={'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): container_name = obj.container.name object_name = obj.name response = self.connection.request('/%s/%s' % (container_name, object_name), method='GET', raw=True) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, headers=None): """ Upload an object. Note: This will override file with a same name if it already exists. """ return self._put_object(container=container, object_name=object_name, extra=extra, file_path=file_path, verify_hash=verify_hash, headers=headers) def upload_object_via_stream(self, iterator, container, object_name, extra=None, headers=None): if isinstance(iterator, file): iterator = iter(iterator) return self._put_object(container=container, object_name=object_name, extra=extra, stream=iterator, headers=headers) def delete_object(self, obj): container_name = self._encode_container_name(obj.container.name) object_name = self._encode_object_name(obj.name) response = self.connection.request( '/%s/%s' % (container_name, object_name), method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value='', object_name=object_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status)) def ex_purge_object_from_cdn(self, obj, email=None): """ Purge edge cache for the specified object. :param email: Email where a notification will be sent when the job completes. (optional) :type email: ``str`` """ container_name = self._encode_container_name(obj.container.name) object_name = self._encode_object_name(obj.name) headers = {'X-Purge-Email': email} if email else {} # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s/%s' % (container_name, object_name), method='DELETE', headers=headers, cdn_request=True) return response.status == httplib.NO_CONTENT def ex_get_meta_data(self): """ Get meta data :rtype: ``dict`` """ response = self.connection.request('', method='HEAD') if response.status == httplib.NO_CONTENT: container_count = response.headers.get( 'x-account-container-count', 'unknown') object_count = response.headers.get( 'x-account-object-count', 'unknown') bytes_used = response.headers.get( 'x-account-bytes-used', 'unknown') temp_url_key = response.headers.get( 'x-account-meta-temp-url-key', None) return {'container_count': int(container_count), 'object_count': int(object_count), 'bytes_used': int(bytes_used), 'temp_url_key': temp_url_key} raise LibcloudError('Unexpected status code: %s' % (response.status)) def ex_multipart_upload_object(self, file_path, container, object_name, chunk_size=33554432, extra=None, verify_hash=True): object_size = os.path.getsize(file_path) if object_size < chunk_size: return self.upload_object(file_path, container, object_name, extra=extra, verify_hash=verify_hash) iter_chunk_reader = FileChunkReader(file_path, chunk_size) for index, iterator in enumerate(iter_chunk_reader): self._upload_object_part(container=container, object_name=object_name, part_number=index, iterator=iterator, verify_hash=verify_hash) return self._upload_object_manifest(container=container, object_name=object_name, extra=extra, verify_hash=verify_hash) def ex_enable_static_website(self, container, index_file='index.html'): """ Enable serving a static website. :param container: Container instance :type container: :class:`Container` :param index_file: Name of the object which becomes an index page for every sub-directory in this container. :type index_file: ``str`` :rtype: ``bool`` """ container_name = container.name headers = {'X-Container-Meta-Web-Index': index_file} # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='POST', headers=headers, cdn_request=False) return response.status in [httplib.CREATED, httplib.ACCEPTED] def ex_set_error_page(self, container, file_name='error.html'): """ Set a custom error page which is displayed if file is not found and serving of a static website is enabled. :param container: Container instance :type container: :class:`Container` :param file_name: Name of the object which becomes the error page. :type file_name: ``str`` :rtype: ``bool`` """ container_name = container.name headers = {'X-Container-Meta-Web-Error': file_name} # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='POST', headers=headers, cdn_request=False) return response.status in [httplib.CREATED, httplib.ACCEPTED] def ex_set_account_metadata_temp_url_key(self, key): """ Set the metadata header X-Account-Meta-Temp-URL-Key on your Cloud Files account. :param key: X-Account-Meta-Temp-URL-Key :type key: ``str`` :rtype: ``bool`` """ headers = {'X-Account-Meta-Temp-URL-Key': key} # pylint: disable=unexpected-keyword-arg response = self.connection.request('', method='POST', headers=headers, cdn_request=False) return response.status in [httplib.OK, httplib.NO_CONTENT, httplib.CREATED, httplib.ACCEPTED] def ex_get_object_temp_url(self, obj, method='GET', timeout=60): """ Create a temporary URL to allow others to retrieve or put objects in your Cloud Files account for as long or as short a time as you wish. This method is specifically for allowing users to retrieve or update an object. :param obj: The object that you wish to make temporarily public :type obj: :class:`Object` :param method: Which method you would like to allow, 'PUT' or 'GET' :type method: ``str`` :param timeout: Time (in seconds) after which you want the TempURL to expire. :type timeout: ``int`` :rtype: ``bool`` """ # pylint: disable=no-member self.connection._populate_hosts_and_request_paths() expires = int(time() + timeout) path = '%s/%s/%s' % (self.connection.request_path, obj.container.name, obj.name) try: key = self.ex_get_meta_data()['temp_url_key'] assert key is not None except Exception: raise KeyError('You must first set the ' + 'X-Account-Meta-Temp-URL-Key header on your ' + 'Cloud Files account using ' + 'ex_set_account_metadata_temp_url_key before ' + 'you can use this method.') hmac_body = '%s\n%s\n%s' % (method, expires, path) sig = hmac.new(b(key), b(hmac_body), sha1).hexdigest() params = urlencode({'temp_url_sig': sig, 'temp_url_expires': expires}) temp_url = 'https://%s/%s/%s?%s' %\ (self.connection.host + self.connection.request_path, obj.container.name, obj.name, params) return temp_url def _upload_object_part(self, container, object_name, part_number, iterator, verify_hash=True): part_name = object_name + '/%08d' % part_number extra = {'content_type': 'application/octet-stream'} self._put_object(container=container, object_name=part_name, extra=extra, stream=iterator, verify_hash=verify_hash) def _upload_object_manifest(self, container, object_name, extra=None, verify_hash=True): extra = extra or {} meta_data = extra.get('meta_data') container_name_encoded = self._encode_container_name(container.name) object_name_encoded = self._encode_object_name(object_name) request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) # pylint: disable=no-member headers = {'X-Auth-Token': self.connection.auth_token, 'X-Object-Manifest': '%s/%s/' % (container_name_encoded, object_name_encoded)} data = '' response = self.connection.request(request_path, method='PUT', data=data, headers=headers, raw=True) object_hash = None if verify_hash: hash_function = self._get_hash_function() hash_function.update(b(data)) data_hash = hash_function.hexdigest() object_hash = response.headers.get('etag') if object_hash != data_hash: raise ObjectHashMismatchError( value=('MD5 hash checksum does not match (expected=%s, ' + 'actual=%s)') % (data_hash, object_hash), object_name=object_name, driver=self) obj = Object(name=object_name, size=0, hash=object_hash, extra=None, meta_data=meta_data, container=container, driver=self) return obj def list_container_objects(self, container, ex_prefix=None): """ Return a list of objects for the given container. :param container: Container instance. :type container: :class:`Container` :param ex_prefix: Only get objects with names starting with ex_prefix :type ex_prefix: ``str`` :return: A list of Object instances. :rtype: ``list`` of :class:`Object` """ return list(self.iterate_container_objects(container, ex_prefix=ex_prefix)) def iterate_container_objects(self, container, ex_prefix=None): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :param ex_prefix: Only get objects with names starting with ex_prefix :type ex_prefix: ``str`` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ params = {} if ex_prefix: params['prefix'] = ex_prefix while True: container_name_encoded = \ self._encode_container_name(container.name) response = self.connection.request('/%s' % (container_name_encoded), params=params) if response.status == httplib.NO_CONTENT: # Empty or non-existent container break elif response.status == httplib.OK: objects = self._to_object_list(json.loads(response.body), container) if len(objects) == 0: break for obj in objects: yield obj params['marker'] = obj.name else: raise LibcloudError('Unexpected status code: %s' % (response.status)) def _put_object(self, container, object_name, extra=None, file_path=None, stream=None, verify_hash=True, headers=None): extra = extra or {} container_name_encoded = self._encode_container_name(container.name) object_name_encoded = self._encode_object_name(object_name) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) content_disposition = extra.get('content_disposition', None) headers = headers or {} if meta_data: for key, value in list(meta_data.items()): key = 'X-Object-Meta-%s' % (key) headers[key] = value if content_disposition is not None: headers['Content-Disposition'] = content_disposition request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method='PUT', headers=headers, file_path=file_path, stream=stream) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] server_hash = result_dict['response'].headers.get('etag', None) if response.status == httplib.EXPECTATION_FAILED: raise LibcloudError(value='Missing content-type header', driver=self) elif verify_hash and not server_hash: raise LibcloudError(value='Server didn\'t return etag', driver=self) elif (verify_hash and result_dict['data_hash'] != server_hash): raise ObjectHashMismatchError( value=('MD5 hash checksum does not match (expected=%s, ' + 'actual=%s)') % (result_dict['data_hash'], server_hash), object_name=object_name, driver=self) elif response.status == httplib.CREATED: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, extra=None, meta_data=meta_data, container=container, driver=self) return obj else: # @TODO: Add test case for this condition (probably 411) raise LibcloudError('status_code=%s' % (response.status), driver=self) def _encode_container_name(self, name): """ Encode container name so it can be used as part of the HTTP request. """ if name.startswith('/'): name = name[1:] name = urlquote(name) if name.find('/') != -1: raise InvalidContainerNameError(value='Container name cannot' ' contain slashes', container_name=name, driver=self) if len(name) > 256: raise InvalidContainerNameError( value='Container name cannot be longer than 256 bytes', container_name=name, driver=self) return name def _encode_object_name(self, name): name = urlquote(name) return name def _to_container_list(self, response): # @TODO: Handle more than 10k containers - use "lazy list"? for container in response: extra = {'object_count': int(container['count']), 'size': int(container['bytes'])} yield Container(name=container['name'], extra=extra, driver=self) def _to_object_list(self, response, container): objects = [] for obj in response: name = obj['name'] size = int(obj['bytes']) hash = obj['hash'] extra = {'content_type': obj['content_type'], 'last_modified': obj['last_modified']} objects.append(Object( name=name, size=size, hash=hash, extra=extra, meta_data=None, container=container, driver=self)) return objects def _headers_to_container(self, name, headers): size = int(headers.get('x-container-bytes-used', 0)) object_count = int(headers.get('x-container-object-count', 0)) extra = {'object_count': object_count, 'size': size} container = Container(name=name, extra=extra, driver=self) return container def _headers_to_object(self, name, container, headers): size = int(headers.pop('content-length', 0)) last_modified = headers.pop('last-modified', None) etag = headers.pop('etag', None) content_type = headers.pop('content-type', None) meta_data = {} for key, value in list(headers.items()): if key.find('x-object-meta-') != -1: key = key.replace('x-object-meta-', '') meta_data[key] = value extra = {'content_type': content_type, 'last_modified': last_modified} obj = Object(name=name, size=size, hash=etag, extra=extra, meta_data=meta_data, container=container, driver=self) return obj def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs['ex_force_service_region'] = self.region kwargs['use_internal_url'] = self.use_internal_url return kwargs class OpenStackSwiftStorageDriver(CloudFilesStorageDriver): """ Storage driver for the OpenStack Swift. """ type = Provider.CLOUDFILES_SWIFT name = 'OpenStack Swift' connectionCls = OpenStackSwiftConnection # TODO: Reverse the relationship - Swift -> CloudFiles def __init__(self, key, secret=None, secure=True, host=None, port=None, region=None, **kwargs): super(OpenStackSwiftStorageDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) class FileChunkReader(object): def __init__(self, file_path, chunk_size): self.file_path = file_path self.total = os.path.getsize(file_path) self.chunk_size = chunk_size self.bytes_read = 0 self.stop_iteration = False def __iter__(self): return self def next(self): if self.stop_iteration: raise StopIteration start_block = self.bytes_read end_block = start_block + self.chunk_size if end_block >= self.total: end_block = self.total self.stop_iteration = True self.bytes_read += end_block - start_block return ChunkStreamReader(file_path=self.file_path, start_block=start_block, end_block=end_block, chunk_size=8192) def __next__(self): return self.next() class ChunkStreamReader(object): def __init__(self, file_path, start_block, end_block, chunk_size): self.fd = open(file_path, 'rb') self.fd.seek(start_block) self.start_block = start_block self.end_block = end_block self.chunk_size = chunk_size self.bytes_read = 0 self.stop_iteration = False def __iter__(self): return self def next(self): if self.stop_iteration: self.fd.close() raise StopIteration block_size = self.chunk_size if self.bytes_read + block_size > \ self.end_block - self.start_block: block_size = self.end_block - self.start_block - self.bytes_read self.stop_iteration = True block = self.fd.read(block_size) self.bytes_read += block_size return block def __next__(self): return self.next() apache-libcloud-2.2.1/libcloud/storage/drivers/rgw.py0000664000175000017500000001234513153541406022515 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.types import LibcloudError from libcloud.common.aws import SignedAWSConnection, DEFAULT_SIGNATURE_VERSION from libcloud.storage.drivers.s3 import BaseS3Connection, S3Connection from libcloud.storage.drivers.s3 import S3StorageDriver, API_VERSION __all__ = [ 'S3RGWStorageDriver', 'S3RGWOutscaleStorageDriver' ] S3_RGW_DEFAULT_REGION = 'default' S3_RGW_OUTSCALE_HOSTS_BY_REGION =\ {'eu-west-1': 'osu.eu-west-1.outscale.com', 'eu-west-2': 'osu.eu-west-2.outscale.com', 'us-west-1': 'osu.us-west-1.outscale.com', 'us-east-2': 'osu.us-east-2.outscale.com', 'cn-southeast-1': 'osu.cn-southeast-1.outscale.hk'} S3_RGW_OUTSCALE_DEFAULT_REGION = 'eu-west-2' class S3RGWConnectionAWS4(SignedAWSConnection, BaseS3Connection): service_name = 's3' version = API_VERSION def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None, **kwargs): super(S3RGWConnectionAWS4, self).__init__(user_id, key, secure, host, port, url, timeout, proxy_url, token, retry_delay, backoff, 4) # force aws4 class S3RGWConnectionAWS2(S3Connection): def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None, **kwargs): super(S3RGWConnectionAWS2, self).__init__(user_id, key, secure, host, port, url, timeout, proxy_url, token, retry_delay, backoff) class S3RGWStorageDriver(S3StorageDriver): name = 'Ceph RGW' website = 'http://ceph.com/' def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=S3_RGW_DEFAULT_REGION, **kwargs): if host is None: raise LibcloudError('host required', driver=self) self.name = kwargs.pop('name', None) if self.name is None: self.name = 'Ceph RGW S3 (%s)' % (region) self.ex_location_name = region self.region_name = region self.signature_version = str(kwargs.pop('signature_version', DEFAULT_SIGNATURE_VERSION)) if self.signature_version not in ['2', '4']: raise ValueError('Invalid signature_version: %s' % (self.signature_version)) if self.signature_version == '2': self.connectionCls = S3RGWConnectionAWS2 elif self.signature_version == '4': self.connectionCls = S3RGWConnectionAWS4 self.connectionCls.host = host super(S3RGWStorageDriver, self).__init__(key, secret, secure, host, port, api_version, region, **kwargs) def _ex_connection_class_kwargs(self): kwargs = {} kwargs['signature_version'] = self.signature_version return kwargs class S3RGWOutscaleStorageDriver(S3RGWStorageDriver): name = 'RGW Outscale' website = 'https://en.outscale.com/' def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=S3_RGW_OUTSCALE_DEFAULT_REGION, **kwargs): if region not in S3_RGW_OUTSCALE_HOSTS_BY_REGION: raise LibcloudError('Unknown region (%s)' % (region), driver=self) host = S3_RGW_OUTSCALE_HOSTS_BY_REGION[region] kwargs['name'] = 'OUTSCALE Ceph RGW S3 (%s)' % region super(S3RGWOutscaleStorageDriver, self).__init__(key, secret, secure, host, port, api_version, region, **kwargs) apache-libcloud-2.2.1/libcloud/storage/drivers/__init__.py0000664000175000017500000000156012701023453023445 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Drivers for working with different providers """ __all__ = [ 'dummy', 'cloudfiles' ] apache-libcloud-2.2.1/libcloud/storage/drivers/ninefold.py0000664000175000017500000000206312701023453023503 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.storage.providers import Provider from libcloud.storage.drivers.atmos import AtmosDriver class NinefoldStorageDriver(AtmosDriver): host = 'api.ninefold.com' path = '/storage/v1.0' type = Provider.NINEFOLD name = 'Ninefold' website = 'http://ninefold.com/' apache-libcloud-2.2.1/libcloud/storage/drivers/azure_blobs.py0000664000175000017500000010540213160264462024224 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import base64 import os import binascii from libcloud.utils.py3 import ET from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import tostring from libcloud.utils.py3 import b from libcloud.utils.xml import fixxpath from libcloud.utils.files import read_in_chunks from libcloud.common.types import LibcloudError from libcloud.common.azure import AzureConnection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError # Desired number of items in each response inside a paginated request RESPONSES_PER_REQUEST = 100 # As per the Azure documentation, if the upload file size is less than # 64MB, we can upload it in a single request. However, in real life azure # servers seem to disconnect randomly after around 5 MB or 200s of upload. # So, it is better that for file sizes greater than 4MB, we upload it in # chunks. # Also, with large sizes, if we use a lease, the lease will timeout after # 60 seconds, but the upload might still be in progress. This can be # handled in code, but if we use chunked uploads, the lease renewal will # happen automatically. AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024 # Azure block blocks must be maximum 4MB # Azure page blobs must be aligned in 512 byte boundaries (4MB fits that) AZURE_CHUNK_SIZE = 4 * 1024 * 1024 # Azure page blob must be aligned in 512 byte boundaries AZURE_PAGE_CHUNK_SIZE = 512 # The time period (in seconds) for which a lease must be obtained. # If set as -1, we get an infinite lease, but that is a bad idea. If # after getting an infinite lease, there was an issue in releasing the # lease, the object will remain 'locked' forever, unless the lease is # released using the lease_id (which is not exposed to the user) AZURE_LEASE_PERIOD = 60 AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net' class AzureBlobLease(object): """ A class to help in leasing an azure blob and renewing the lease """ def __init__(self, driver, object_path, use_lease): """ :param driver: The Azure storage driver that is being used :type driver: :class:`AzureStorageDriver` :param object_path: The path of the object we need to lease :type object_path: ``str`` :param use_lease: Indicates if we must take a lease or not :type use_lease: ``bool`` """ self.object_path = object_path self.driver = driver self.use_lease = use_lease self.lease_id = None self.params = {'comp': 'lease'} def renew(self): """ Renew the lease if it is older than a predefined time period """ if self.lease_id is None: return headers = {'x-ms-lease-action': 'renew', 'x-ms-lease-id': self.lease_id, 'x-ms-lease-duration': '60'} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status != httplib.OK: raise LibcloudError('Unable to obtain lease', driver=self) def update_headers(self, headers): """ Update the lease id in the headers """ if self.lease_id: headers['x-ms-lease-id'] = self.lease_id def __enter__(self): if not self.use_lease: return self headers = {'x-ms-lease-action': 'acquire', 'x-ms-lease-duration': '60'} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status == httplib.NOT_FOUND: return self elif response.status != httplib.CREATED: raise LibcloudError('Unable to obtain lease', driver=self) self.lease_id = response.headers['x-ms-lease-id'] return self def __exit__(self, type, value, traceback): if self.lease_id is None: return headers = {'x-ms-lease-action': 'release', 'x-ms-lease-id': self.lease_id} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status != httplib.OK: raise LibcloudError('Unable to release lease', driver=self) class AzureBlobsConnection(AzureConnection): """ Represents a single connection to Azure Blobs """ class AzureBlobsStorageDriver(StorageDriver): name = 'Microsoft Azure (blobs)' website = 'http://windows.azure.com/' connectionCls = AzureBlobsConnection hash_type = 'md5' supports_chunked_encoding = False ex_blob_type = 'BlockBlob' def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): self._host_argument_set = bool(host) # B64decode() this key and keep it, so that we don't have to do # so for every request. Minor performance improvement secret = base64.b64decode(b(secret)) super(AzureBlobsStorageDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def _ex_connection_class_kwargs(self): result = {} # host argument has precedence if not self._host_argument_set: result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX) return result def _xml_to_container(self, node): """ Converts a container XML node to a container instance :param node: XML info of the container :type node: :class:`xml.etree.ElementTree.Element` :return: A container instance :rtype: :class:`Container` """ name = node.findtext(fixxpath(xpath='Name')) props = node.find(fixxpath(xpath='Properties')) metadata = node.find(fixxpath(xpath='Metadata')) extra = { 'url': node.findtext(fixxpath(xpath='Url')), 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')), 'etag': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'meta_data': {} } for meta in metadata.getchildren(): extra['meta_data'][meta.tag] = meta.text return Container(name=name, extra=extra, driver=self) def _response_to_container(self, container_name, response): """ Converts a HTTP response to a container instance :param container_name: Name of the container :type container_name: ``str`` :param response: HTTP Response :type node: L{} :return: A container instance :rtype: :class:`Container` """ headers = response.headers extra = { 'url': 'http://%s%s' % (response.connection.host, response.connection.action), 'etag': headers['etag'], 'last_modified': headers['last-modified'], 'lease': { 'status': headers.get('x-ms-lease-status', None), 'state': headers.get('x-ms-lease-state', None), 'duration': headers.get('x-ms-lease-duration', None), }, 'meta_data': {} } for key, value in response.headers.items(): if key.startswith('x-ms-meta-'): key = key.split('x-ms-meta-')[1] extra['meta_data'][key] = value return Container(name=container_name, extra=extra, driver=self) def _xml_to_object(self, container, blob): """ Converts a BLOB XML node to an object instance :param container: Instance of the container holding the blob :type: :class:`Container` :param blob: XML info of the blob :type blob: L{} :return: An object instance :rtype: :class:`Object` """ name = blob.findtext(fixxpath(xpath='Name')) props = blob.find(fixxpath(xpath='Properties')) metadata = blob.find(fixxpath(xpath='Metadata')) etag = props.findtext(fixxpath(xpath='Etag')) size = int(props.findtext(fixxpath(xpath='Content-Length'))) extra = { 'content_type': props.findtext(fixxpath(xpath='Content-Type')), 'etag': etag, 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')), 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')), 'url': blob.findtext(fixxpath(xpath='Url')), 'hash': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'content_encoding': props.findtext(fixxpath( xpath='Content-Encoding')), 'content_language': props.findtext(fixxpath( xpath='Content-Language')), 'blob_type': props.findtext(fixxpath(xpath='BlobType')) } if extra['md5_hash']: value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) value = value.decode('ascii') extra['md5_hash'] = value meta_data = {} for meta in metadata.getchildren(): meta_data[meta.tag] = meta.text return Object(name=name, size=size, hash=etag, meta_data=meta_data, extra=extra, container=container, driver=self) def _response_to_object(self, object_name, container, response): """ Converts a HTTP response to an object (from headers) :param object_name: Name of the object :type object_name: ``str`` :param container: Instance of the container holding the blob :type: :class:`Container` :param response: HTTP Response :type node: L{} :return: An object instance :rtype: :class:`Object` """ headers = response.headers size = int(headers['content-length']) etag = headers['etag'] extra = { 'url': 'http://%s%s' % (response.connection.host, response.connection.action), 'etag': etag, 'md5_hash': headers.get('content-md5', None), 'content_type': headers.get('content-type', None), 'content_language': headers.get('content-language', None), 'content_encoding': headers.get('content-encoding', None), 'last_modified': headers['last-modified'], 'lease': { 'status': headers.get('x-ms-lease-status', None), 'state': headers.get('x-ms-lease-state', None), 'duration': headers.get('x-ms-lease-duration', None), }, 'blob_type': headers['x-ms-blob-type'] } if extra['md5_hash']: value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) value = value.decode('ascii') extra['md5_hash'] = value meta_data = {} for key, value in response.headers.items(): if key.startswith('x-ms-meta-'): key = key.split('x-ms-meta-')[1] meta_data[key] = value return Object(name=object_name, size=size, hash=etag, extra=extra, meta_data=meta_data, container=container, driver=self) def iterate_containers(self): """ @inherits: :class:`StorageDriver.iterate_containers` """ params = {'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} while True: response = self.connection.request('/', params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() containers = body.find(fixxpath(xpath='Containers')) containers = containers.findall(fixxpath(xpath='Container')) for container in containers: yield self._xml_to_container(container) params['marker'] = body.findtext('NextMarker') if not params['marker']: break def iterate_container_objects(self, container): """ @inherits: :class:`StorageDriver.iterate_container_objects` """ params = {'restype': 'container', 'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} container_path = self._get_container_path(container) while True: response = self.connection.request(container_path, params=params) if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) elif response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() blobs = body.find(fixxpath(xpath='Blobs')) blobs = blobs.findall(fixxpath(xpath='Blob')) for blob in blobs: yield self._xml_to_object(container, blob) params['marker'] = body.findtext('NextMarker') if not params['marker']: break def get_container(self, container_name): """ @inherits: :class:`StorageDriver.get_container` """ params = {'restype': 'container'} container_path = '/%s' % (container_name) response = self.connection.request(container_path, params=params, method='HEAD') if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError('Container %s does not exist' % (container_name), driver=self, container_name=container_name) elif response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) return self._response_to_container(container_name, response) def get_object(self, container_name, object_name): """ @inherits: :class:`StorageDriver.get_object` """ container = self.get_container(container_name=container_name) object_path = self._get_object_path(container, object_name) response = self.connection.request(object_path, method='HEAD') if response.status == httplib.OK: obj = self._response_to_object(object_name, container, response) return obj raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def _get_container_path(self, container): """ Return a container path :param container: Container instance :type container: :class:`Container` :return: A path for this container. :rtype: ``str`` """ return '/%s' % (container.name) def _get_object_path(self, container, object_name): """ Return an object's CDN path. :param container: Container instance :type container: :class:`Container` :param object_name: Object name :type object_name: :class:`str` :return: A path for this object. :rtype: ``str`` """ container_url = self._get_container_path(container) object_name_cleaned = urlquote(object_name) object_path = '%s/%s' % (container_url, object_name_cleaned) return object_path def create_container(self, container_name): """ @inherits: :class:`StorageDriver.create_container` """ params = {'restype': 'container'} container_path = '/%s' % (container_name) response = self.connection.request(container_path, params=params, method='PUT') if response.status == httplib.CREATED: return self._response_to_container(container_name, response) elif response.status == httplib.CONFLICT: raise ContainerAlreadyExistsError( value='Container with this name already exists. The name must ' 'be unique among all the containers in the system', container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise InvalidContainerNameError(value='Container name contains ' + 'invalid characters.', container_name=container_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def delete_container(self, container): """ @inherits: :class:`StorageDriver.delete_container` """ # Azure does not check if the container is empty. So, we will do # a check to ensure that the behaviour is similar to other drivers for obj in container.iterate_objects(): raise ContainerIsNotEmptyError( value='Container must be empty before it can be deleted.', container_name=container.name, driver=self) params = {'restype': 'container'} container_path = self._get_container_path(container) # Note: All the objects in the container must be deleted first response = self.connection.request(container_path, params=params, method='DELETE') if response.status == httplib.ACCEPTED: return True elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return False def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): """ @inherits: :class:`StorageDriver.download_object` """ obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, raw=True, data=None) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): """ @inherits: :class:`StorageDriver.download_object_as_stream` """ obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, raw=True, data=None) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) def _upload_in_chunks(self, response, data, iterator, object_path, blob_type, lease, calculate_hash=True): """ Uploads data from an interator in fixed sized chunks to S3 :param response: Response object from the initial POST request :type response: :class:`RawResponse` :param data: Any data from the initial POST request :type data: ``str`` :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param object_path: The path of the object to which we are uploading :type object_name: ``str`` :param blob_type: The blob type being uploaded :type blob_type: ``str`` :param lease: The lease object to be used for renewal :type lease: :class:`AzureBlobLease` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (status, checksum, bytes transferred) :rtype: ``tuple`` """ # Get the upload id from the response xml if response.status != httplib.CREATED: raise LibcloudError('Error initializing upload. Code: %d' % (response.status), driver=self) data_hash = None if calculate_hash: data_hash = self._get_hash_function() bytes_transferred = 0 count = 1 chunks = [] headers = {} lease.update_headers(headers) if blob_type == 'BlockBlob': params = {'comp': 'block'} else: params = {'comp': 'page'} # Read the input data in chunk sizes suitable for AWS for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE): data = b(data) content_length = len(data) offset = bytes_transferred bytes_transferred += content_length if calculate_hash: data_hash.update(data) chunk_hash = self._get_hash_function() chunk_hash.update(data) chunk_hash = base64.b64encode(b(chunk_hash.digest())) headers['Content-MD5'] = chunk_hash.decode('utf-8') headers['Content-Length'] = str(content_length) if blob_type == 'BlockBlob': # Block id can be any unique string that is base64 encoded # A 10 digit number can hold the max value of 50000 blocks # that are allowed for azure block_id = base64.b64encode(b('%10d' % (count))) block_id = block_id.decode('utf-8') params['blockid'] = block_id # Keep this data for a later commit chunks.append(block_id) else: headers['x-ms-page-write'] = 'update' headers['x-ms-range'] = 'bytes=%d-%d' % \ (offset, (bytes_transferred - 1)) # Renew lease before updating lease.renew() resp = self.connection.request(object_path, method='PUT', data=data, headers=headers, params=params) if resp.status != httplib.CREATED: resp.parse_error() raise LibcloudError('Error uploading chunk %d. Code: %d' % (count, resp.status), driver=self) count += 1 if calculate_hash: data_hash = data_hash.hexdigest() if blob_type == 'BlockBlob': self._commit_blocks(object_path, chunks, lease) # The Azure service does not return a hash immediately for # chunked uploads. It takes some time for the data to get synced response.headers['content-md5'] = None return (True, data_hash, bytes_transferred) def _commit_blocks(self, object_path, chunks, lease): """ Makes a final commit of the data. :param object_path: Server side object path. :type object_path: ``str`` :param upload_id: A list of (chunk_number, chunk_hash) tuples. :type upload_id: ``list`` """ root = ET.Element('BlockList') for block_id in chunks: part = ET.SubElement(root, 'Uncommitted') part.text = str(block_id) data = tostring(root) params = {'comp': 'blocklist'} headers = {} lease.update_headers(headers) lease.renew() response = self.connection.request(object_path, data=data, params=params, headers=headers, method='PUT') if response.status != httplib.CREATED: raise LibcloudError('Error in blocklist commit', driver=self) def _check_values(self, blob_type, object_size): """ Checks if extension arguments are valid :param blob_type: The blob type that is being uploaded :type blob_type: ``str`` :param object_size: The (max) size of the object being uploaded :type object_size: ``int`` """ if blob_type not in ['BlockBlob', 'PageBlob']: raise LibcloudError('Invalid blob type', driver=self) if blob_type == 'PageBlob': if not object_size: raise LibcloudError('Max blob size is mandatory for page blob', driver=self) if object_size % AZURE_PAGE_CHUNK_SIZE: raise LibcloudError('Max blob size is not aligned to ' 'page boundary', driver=self) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, ex_blob_type=None, ex_use_lease=False): """ Upload an object currently located on a disk. @inherits: :class:`StorageDriver.upload_object` :param ex_blob_type: Storage class :type ex_blob_type: ``str`` :param ex_use_lease: Indicates if we must take a lease before upload :type ex_use_lease: ``bool`` """ if ex_blob_type is None: ex_blob_type = self.ex_blob_type # Get the size of the file file_size = os.stat(file_path).st_size # The presumed size of the object object_size = file_size self._check_values(ex_blob_type, file_size) # If size is greater than 64MB or type is Page, upload in chunks if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE: # For chunked upload of block blobs, the initial size must # be 0. if ex_blob_type == 'BlockBlob': object_size = None return self._put_object(container=container, object_name=object_name, object_size=object_size, file_path=file_path, extra=extra, verify_hash=verify_hash, blob_type=ex_blob_type, use_lease=ex_use_lease) def upload_object_via_stream(self, iterator, container, object_name, verify_hash=False, extra=None, ex_use_lease=False, ex_blob_type=None, ex_page_blob_size=None): """ @inherits: :class:`StorageDriver.upload_object_via_stream` :param ex_blob_type: Storage class :type ex_blob_type: ``str`` :param ex_page_blob_size: The maximum size to which the page blob can grow to :type ex_page_blob_size: ``int`` :param ex_use_lease: Indicates if we must take a lease before upload :type ex_use_lease: ``bool`` """ if ex_blob_type is None: ex_blob_type = self.ex_blob_type self._check_values(ex_blob_type, ex_page_blob_size) return self._put_object(container=container, object_name=object_name, object_size=ex_page_blob_size, extra=extra, verify_hash=verify_hash, blob_type=ex_blob_type, use_lease=ex_use_lease, stream=iterator) def delete_object(self, obj): """ @inherits: :class:`StorageDriver.delete_object` """ object_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(object_path, method='DELETE') if response.status == httplib.ACCEPTED: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, object_name=obj.name) return False def _update_metadata(self, headers, meta_data): """ Update the given metadata in the headers :param headers: The headers dictionary to be updated :type headers: ``dict`` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` """ for key, value in list(meta_data.items()): key = 'x-ms-meta-%s' % (key) headers[key] = value def _prepare_upload_headers(self, object_name, object_size, extra, meta_data, blob_type): """ Prepare headers for uploading an object :param object_name: The full name of the object being updated :type object_name: ``str`` :param object_size: The size of the object. In case of PageBlobs, this indicates the maximum size the blob can grow to :type object_size: ``int`` :param extra: Extra control data for the upload :type extra: ``dict`` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` :param blob_type: Page or Block blob type :type blob_type: ``str`` """ headers = {} if blob_type is None: blob_type = self.ex_blob_type headers['x-ms-blob-type'] = blob_type self._update_metadata(headers, meta_data) if object_size is not None: headers['Content-Length'] = str(object_size) if blob_type == 'PageBlob': headers['Content-Length'] = str('0') headers['x-ms-blob-content-length'] = object_size return headers def _put_object(self, container, object_name, object_size, file_path=None, extra=None, verify_hash=True, blob_type=None, use_lease=False, stream=None): """ Control function that does the real job of uploading data to a blob """ extra = extra or {} meta_data = extra.get('meta_data', {}) content_type = extra.get('content_type', None) headers = self._prepare_upload_headers(object_name, object_size, extra, meta_data, blob_type) object_path = self._get_object_path(container, object_name) # Get a lease if required and do the operations with AzureBlobLease(self, object_path, use_lease) as lease: lease.update_headers(headers) result_dict = self._upload_object(object_name, content_type, object_path, headers=headers, file_path=file_path, stream=stream) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] data_hash = result_dict['data_hash'] headers = response.headers if response.status != httplib.CREATED: raise LibcloudError( 'Unexpected status code, status_code=%s' % (response.status), driver=self) server_hash = headers['content-md5'] if server_hash: server_hash = binascii.hexlify(base64.b64decode(b(server_hash))) server_hash = server_hash.decode('utf-8') else: # TODO: HACK - We could poll the object for a while and get # the hash pass if (verify_hash and server_hash and data_hash != server_hash): raise ObjectHashMismatchError( value='MD5 hash checksum does not match', object_name=object_name, driver=self) return Object(name=object_name, size=bytes_transferred, hash=headers['etag'], extra=None, meta_data=meta_data, container=container, driver=self) def ex_set_object_metadata(self, obj, meta_data): """ Set metadata for an object :param obj: The blob object :type obj: :class:`Object` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` """ object_path = self._get_object_path(obj.container, obj.name) params = {'comp': 'metadata'} headers = {} self._update_metadata(headers, meta_data) response = self.connection.request(object_path, method='PUT', params=params, headers=headers) if response.status != httplib.OK: response.parse_error('Setting metadata') apache-libcloud-2.2.1/libcloud/storage/drivers/backblaze_b2.py0000664000175000017500000004663712705513024024227 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Driver for Backblaze B2 service. """ import base64 import hashlib try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import next from libcloud.utils.files import read_in_chunks from libcloud.utils.files import exhaust_iterator from libcloud.utils.escape import sanitize_object_name from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError from libcloud.storage.providers import Provider from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError __all__ = [ 'BackblazeB2StorageDriver', 'BackblazeB2Connection', 'BackblazeB2AuthConnection' ] AUTH_API_HOST = 'api.backblaze.com' API_PATH = '/b2api/v1/' class BackblazeB2Response(JsonResponse): def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): status = int(self.status) body = self.parse_body() if status == httplib.UNAUTHORIZED: raise InvalidCredsError(body['message']) return self.body class BackblazeB2AuthConnection(ConnectionUserAndKey): host = AUTH_API_HOST secure = True responseCls = BackblazeB2Response def __init__(self, *args, **kwargs): super(BackblazeB2AuthConnection, self).__init__(*args, **kwargs) # Those attributes are populated after authentication self.account_id = None self.api_url = None self.api_host = None self.download_url = None self.download_host = None self.auth_token = None def authenticate(self, force=False): """ :param force: Force authentication if if we have already obtained the token. :type force: ``bool`` """ if not self._is_authentication_needed(force=force): return self headers = {} action = 'b2_authorize_account' auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8')) action = API_PATH + 'b2_authorize_account' resp = self.request(action=action, headers=headers, method='GET') if resp.status == httplib.OK: self._parse_and_set_auth_info(data=resp.object) else: raise Exception('Failed to authenticate: %s' % (str(resp.object))) return self def _parse_and_set_auth_info(self, data): result = {} self.account_id = data['accountId'] self.api_url = data['apiUrl'] self.download_url = data['downloadUrl'] self.auth_token = data['authorizationToken'] parsed_api_url = urlparse.urlparse(self.api_url) self.api_host = parsed_api_url.netloc parsed_download_url = urlparse.urlparse(self.download_url) self.download_host = parsed_download_url.netloc return result def _is_authentication_needed(self, force=False): if not self.auth_token or force: return True return False class BackblazeB2Connection(ConnectionUserAndKey): host = None # Note: host is set after authentication secure = True responseCls = BackblazeB2Response authCls = BackblazeB2AuthConnection def __init__(self, *args, **kwargs): super(BackblazeB2Connection, self).__init__(*args, **kwargs) # Stores info retrieved after authentication (auth token, api url, # dowload url). self._auth_conn = self.authCls(*args, **kwargs) def download_request(self, action, params=None): # Lazily perform authentication auth_conn = self._auth_conn.authenticate() # Set host to the download server self.host = auth_conn.download_host action = '/file/' + action method = 'GET' raw = True response = self._request(auth_conn=auth_conn, action=action, params=params, method=method, raw=raw) return response def upload_request(self, action, headers, upload_host, auth_token, data): # Lazily perform authentication auth_conn = self._auth_conn.authenticate() # Upload host is dynamically retrieved for each upload request self.host = upload_host method = 'POST' raw = False response = self._request(auth_conn=auth_conn, action=action, params=None, data=data, headers=headers, method=method, raw=raw, auth_token=auth_token) return response def request(self, action, params=None, data=None, headers=None, method='GET', raw=False, include_account_id=False): params = params or {} headers = headers or {} # Lazily perform authentication auth_conn = self._auth_conn.authenticate() # Set host self.host = auth_conn.api_host # Include Content-Type if not raw and data: headers['Content-Type'] = 'application/json' # Include account id if include_account_id: if method == 'GET': params['accountId'] = auth_conn.account_id elif method == 'POST': data = data or {} data['accountId'] = auth_conn.account_id action = API_PATH + action if data: data = json.dumps(data) response = self._request(auth_conn=self._auth_conn, action=action, params=params, data=data, method=method, headers=headers, raw=raw) return response def _request(self, auth_conn, action, params=None, data=None, headers=None, method='GET', raw=False, auth_token=None): params = params or {} headers = headers or {} if not auth_token: # If auth token is not explicitly provided, use the default one auth_token = self._auth_conn.auth_token # Include auth token headers['Authorization'] = '%s' % (auth_token) response = super(BackblazeB2Connection, self).request(action=action, params=params, data=data, method=method, headers=headers, raw=raw) return response class BackblazeB2StorageDriver(StorageDriver): connectionCls = BackblazeB2Connection name = 'Backblaze B2' website = 'https://www.backblaze.com/b2/' type = Provider.BACKBLAZE_B2 hash_type = 'sha1' supports_chunked_encoding = False def iterate_containers(self): # pylint: disable=unexpected-keyword-arg resp = self.connection.request(action='b2_list_buckets', method='GET', include_account_id=True) containers = self._to_containers(data=resp.object) return containers def iterate_container_objects(self, container): # TODO: Support pagination params = {'bucketId': container.extra['id']} resp = self.connection.request(action='b2_list_file_names', method='GET', params=params) objects = self._to_objects(data=resp.object, container=container) return objects def get_container(self, container_name): containers = self.iterate_containers() container = next((c for c in containers if c.name == container_name), None) if container: return container else: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container_name) def get_object(self, container_name, object_name): container = self.get_container(container_name=container_name) objects = self.iterate_container_objects(container=container) obj = next((obj for obj in objects if obj.name == object_name), None) if obj is not None: return obj else: raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def create_container(self, container_name, ex_type='allPrivate'): data = {} data['bucketName'] = container_name data['bucketType'] = ex_type # pylint: disable=unexpected-keyword-arg resp = self.connection.request(action='b2_create_bucket', data=data, method='POST', include_account_id=True) container = self._to_container(item=resp.object) return container def delete_container(self, container): data = {} data['bucketId'] = container.extra['id'] # pylint: disable=unexpected-keyword-arg resp = self.connection.request(action='b2_delete_bucket', data=data, method='POST', include_account_id=True) return resp.status == httplib.OK def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): action = self._get_object_download_path(container=obj.container, obj=obj) # pylint: disable=no-member response = self.connection.download_request(action=action) # TODO: Include metadata from response headers return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure }, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): action = self._get_object_download_path(container=obj.container, obj=obj) # pylint: disable=no-member response = self.connection.download_request(action=action) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, headers=None): """ Upload an object. Note: This will override file with a same name if it already exists. """ # Note: We don't use any of the base driver functions since Backblaze # API requires you to provide SHA1 has upfront and the base methods # don't support that with open(file_path, 'rb') as fp: iterator = iter(fp) iterator = read_in_chunks(iterator=iterator) data = exhaust_iterator(iterator=iterator) obj = self._perform_upload(data=data, container=container, object_name=object_name, extra=extra, verify_hash=verify_hash, headers=headers) return obj def upload_object_via_stream(self, iterator, container, object_name, extra=None, headers=None): """ Upload an object. Note: Backblaze does not yet support uploading via stream, so this calls upload_object internally requiring the object data to be loaded into memory at once """ iterator = read_in_chunks(iterator=iterator) data = exhaust_iterator(iterator=iterator) obj = self._perform_upload(data=data, container=container, object_name=object_name, extra=extra, headers=headers) return obj def delete_object(self, obj): data = {} data['fileName'] = obj.name data['fileId'] = obj.extra['fileId'] resp = self.connection.request(action='b2_delete_file_version', data=data, method='POST') return resp.status == httplib.OK def ex_get_object(self, object_id): params = {} params['fileId'] = object_id resp = self.connection.request(action='b2_get_file_info', method='GET', params=params) obj = self._to_object(item=resp.object, container=None) return obj def ex_hide_object(self, container_id, object_name): data = {} data['bucketId'] = container_id data['fileName'] = object_name resp = self.connection.request(action='b2_hide_file', data=data, method='POST') obj = self._to_object(item=resp.object, container=None) return obj def ex_list_object_versions(self, container_id, ex_start_file_name=None, ex_start_file_id=None, ex_max_file_count=None): params = {} params['bucketId'] = container_id if ex_start_file_name: params['startFileName'] = ex_start_file_name if ex_start_file_id: params['startFileId'] = ex_start_file_id if ex_max_file_count: params['maxFileCount'] = ex_max_file_count resp = self.connection.request(action='b2_list_file_versions', params=params, method='GET') objects = self._to_objects(data=resp.object, container=None) return objects def ex_get_upload_data(self, container_id): """ Retrieve information used for uploading files (upload url, auth token, etc). :rype: ``dict`` """ # TODO: This is static (AFAIK) so it could be cached params = {} params['bucketId'] = container_id response = self.connection.request(action='b2_get_upload_url', method='GET', params=params) return response.object def ex_get_upload_url(self, container_id): """ Retrieve URL used for file uploads. :rtype: ``str`` """ result = self.ex_get_upload_data(container_id=container_id) upload_url = result['uploadUrl'] return upload_url def _to_containers(self, data): result = [] for item in data['buckets']: container = self._to_container(item=item) result.append(container) return result def _to_container(self, item): extra = {} extra['id'] = item['bucketId'] extra['bucketType'] = item['bucketType'] container = Container(name=item['bucketName'], extra=extra, driver=self) return container def _to_objects(self, data, container): result = [] for item in data['files']: obj = self._to_object(item=item, container=container) result.append(obj) return result def _to_object(self, item, container=None): extra = {} extra['fileId'] = item['fileId'] extra['uploadTimestamp'] = item.get('uploadTimestamp', None) size = item.get('size', item.get('contentLength', None)) hash = item.get('contentSha1', None) meta_data = item.get('fileInfo', {}) obj = Object(name=item['fileName'], size=size, hash=hash, extra=extra, meta_data=meta_data, container=container, driver=self) return obj def _get_object_download_path(self, container, obj): """ Return a path used in the download requests. :rtype: ``str`` """ path = container.name + '/' + obj.name return path def _perform_upload(self, data, container, object_name, extra=None, verify_hash=True, headers=None): if isinstance(data, str): data = bytearray(data) object_name = sanitize_object_name(object_name) extra = extra or {} content_type = extra.get('content_type', 'b2/x-auto') meta_data = extra.get('meta_data', {}) # Note: Backblaze API doesn't support chunked encoding and we need to # provide Content-Length up front (this is one inside _upload_object):/ headers = headers or {} headers['X-Bz-File-Name'] = object_name headers['Content-Type'] = content_type sha1 = hashlib.sha1() sha1.update(b(data)) headers['X-Bz-Content-Sha1'] = sha1.hexdigest() # Include optional meta-data (up to 10 items) for key, value in meta_data: # TODO: Encode / escape key headers['X-Bz-Info-%s' % (key)] = value upload_data = self.ex_get_upload_data( container_id=container.extra['id']) upload_token = upload_data['authorizationToken'] parsed_url = urlparse.urlparse(upload_data['uploadUrl']) upload_host = parsed_url.netloc request_path = parsed_url.path # pylint: disable=no-member response = self.connection.upload_request(action=request_path, headers=headers, upload_host=upload_host, auth_token=upload_token, data=data) if response.status == httplib.OK: obj = self._to_object(item=response.object, container=container) return obj else: body = response.response.read() raise LibcloudError('Upload failed. status_code=%s, body=%s' % (response.status, body), driver=self) apache-libcloud-2.2.1/libcloud/storage/drivers/oss.py0000664000175000017500000010755613153541406022533 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unexpected-keyword-arg import base64 import codecs import hmac import time import sys from hashlib import sha1 from libcloud.utils.py3 import ET try: from lxml.etree import Element, SubElement except ImportError: from xml.etree.ElementTree import Element, SubElement from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import b from libcloud.utils.py3 import tostring from libcloud.utils.py3 import PY3 from libcloud.utils.xml import fixxpath, findtext from libcloud.utils.files import read_in_chunks from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.base import ConnectionUserAndKey, RawResponse, \ XmlResponse from libcloud.common.types import MalformedResponseError from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError __all__ = [ 'OSSStorageDriver', 'OSSMultipartUpload', 'EXPIRATION_SECONDS', 'CHUNK_SIZE', 'MAX_UPLOADS_PER_RESPONSE' ] GMT_TIME_FORMAT = "%a, %d %b %Y %H:%M:%S GMT" EXPIRATION_SECONDS = 15 * 60 # OSS multi-part chunks must be great than 100KB except the last one CHUNK_SIZE = 100 * 1024 # Desired number of items in each response inside a paginated request in # ex_iterate_multipart_uploads. MAX_UPLOADS_PER_RESPONSE = 1000 class OSSResponse(XmlResponse): namespace = None valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, httplib.BAD_REQUEST] def success(self): i = int(self.status) return i >= 200 and i <= 299 or i in self.valid_response_codes def parse_body(self): """ OSSResponse body is in utf-8 encoding. """ if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: if PY3: parser = ET.XMLParser(encoding='utf-8') body = ET.XML(self.body.encode('utf-8'), parser=parser) else: body = ET.XML(self.body) except: raise MalformedResponseError('Failed to parse XML', body=self.body, driver=self.connection.driver) return body def parse_error(self): if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(self.body) elif self.status == httplib.MOVED_PERMANENTLY: raise LibcloudError('This bucket is located in a different ' + 'region. Please use the correct driver.', driver=OSSStorageDriver) elif self.status == httplib.METHOD_NOT_ALLOWED: raise LibcloudError('The method is not allowed. Status code: %d, ' 'headers: %s' % (self.status, self.headers)) raise LibcloudError('Unknown error. Status code: %d, body: %s' % (self.status, self.body), driver=OSSStorageDriver) class OSSRawResponse(OSSResponse, RawResponse): pass class OSSConnection(ConnectionUserAndKey): """ Represents a single connection to the Aliyun OSS Endpoint """ _domain = 'aliyuncs.com' _default_location = 'oss' responseCls = OSSResponse rawResponseCls = OSSRawResponse @staticmethod def _get_auth_signature(method, headers, params, expires, secret_key, path, vendor_prefix): """ Signature = base64(hmac-sha1(AccessKeySecret, VERB + "\n" + CONTENT-MD5 + "\n" + CONTENT-TYPE + "\n" + EXPIRES + "\n" + CanonicalizedOSSHeaders + CanonicalizedResource)) """ special_headers = {'content-md5': '', 'content-type': '', 'expires': ''} vendor_headers = {} for key, value in list(headers.items()): key_lower = key.lower() if key_lower in special_headers: special_headers[key_lower] = value.strip() elif key_lower.startswith(vendor_prefix): vendor_headers[key_lower] = value.strip() if expires: special_headers['expires'] = str(expires) buf = [method] for _, value in sorted(special_headers.items()): buf.append(value) string_to_sign = '\n'.join(buf) buf = [] for key, value in sorted(vendor_headers.items()): buf.append('%s:%s' % (key, value)) header_string = '\n'.join(buf) values_to_sign = [] for value in [string_to_sign, header_string, path]: if value: values_to_sign.append(value) string_to_sign = '\n'.join(values_to_sign) b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() ) return b64_hmac @staticmethod def _get_expires(params): """ Get expires timeout seconds from parameters. """ expires = None if 'expires' in params: expires = params['expires'] elif 'Expires' in params: expires = params['Expires'] if expires: try: return int(expires) except Exception: pass return int(time.time()) + EXPIRATION_SECONDS def add_default_params(self, params): expires_at = self._get_expires(params) expires = str(expires_at) params['OSSAccessKeyId'] = self.user_id params['Expires'] = expires return params def add_default_headers(self, headers): headers['Date'] = time.strftime(GMT_TIME_FORMAT, time.gmtime()) return headers def pre_connect_hook(self, params, headers): if self._container: path = '/%s%s' % (self._container.name, self.action) else: path = self.action params['Signature'] = self._get_auth_signature( method=self.method, headers=headers, params=params, expires=params['Expires'], secret_key=self.key, path=path, vendor_prefix=self.driver.http_vendor_prefix) return params, headers def request(self, action, params=None, data=None, headers=None, method='GET', raw=False, container=None): self.host = '%s.%s' % (self._default_location, self._domain) self._container = container if container and container.name: if 'location' in container.extra: self.host = '%s.%s.%s' % (container.name, container.extra['location'], self._domain) else: self.host = '%s.%s' % (container.name, self.host) return super(OSSConnection, self).request(action=action, params=params, data=data, headers=headers, method=method, raw=raw) class OSSMultipartUpload(object): """ Class representing an Aliyun OSS multipart upload """ def __init__(self, key, id, initiated): """ Class representing an Aliyun OSS multipart upload :param key: The object/key that was being uploaded :type key: ``str`` :param id: The upload id assigned by Aliyun :type id: ``str`` :param initiated: The date/time at which the upload was started :type created_at: ``str`` """ self.key = key self.id = id self.initiated = initiated def __repr__(self): return ('' % (self.key)) class OSSStorageDriver(StorageDriver): name = 'Aliyun OSS' website = 'http://www.aliyun.com/product/oss' connectionCls = OSSConnection hash_type = 'md5' supports_chunked_encoding = False supports_multipart_upload = True namespace = None http_vendor_prefix = 'x-oss-' def iterate_containers(self): response = self.connection.request('/') if response.status == httplib.OK: containers = self._to_containers(obj=response.object, xpath='Buckets/Bucket') return containers raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def list_container_objects(self, container, ex_prefix=None): """ Return a list of objects for the given container. :param container: Container instance. :type container: :class:`Container` :keyword ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A list of Object instances. :rtype: ``list`` of :class:`Object` """ return list(self.iterate_container_objects(container, ex_prefix=ex_prefix)) def iterate_container_objects(self, container, ex_prefix=None): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :keyword ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ params = {} if ex_prefix: params['prefix'] = ex_prefix last_key = None exhausted = False while not exhausted: if last_key: params['marker'] = last_key response = self.connection.request('/', params=params, container=container) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) objects = self._to_objs(obj=response.object, xpath='Contents', container=container) is_truncated = response.object.findtext(fixxpath( xpath='IsTruncated', namespace=self.namespace)).lower() exhausted = (is_truncated == 'false') last_key = None for obj in objects: last_key = obj.name yield obj def get_container(self, container_name): for container in self.iterate_containers(): if container.name == container_name: return container raise ContainerDoesNotExistError(value=None, driver=self, container_name=container_name) def get_object(self, container_name, object_name): container = self.get_container(container_name=container_name) object_path = self._get_object_path(container, object_name) response = self.connection.request(object_path, method='HEAD', container=container) if response.status == httplib.OK: obj = self._headers_to_object(object_name=object_name, container=container, headers=response.headers) return obj raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def create_container(self, container_name, ex_location=None): """ @inherits :class:`StorageDriver.create_container` :keyword ex_location: The desired location where to create container :type keyword: ``str`` """ extra = None if ex_location: root = Element('CreateBucketConfiguration') child = SubElement(root, 'LocationConstraint') child.text = ex_location data = tostring(root) extra = {'location': ex_location} else: data = '' container = Container(name=container_name, extra=extra, driver=self) response = self.connection.request('/', data=data, method='PUT', container=container) if response.status == httplib.OK: return container elif response.status == httplib.CONFLICT: raise InvalidContainerNameError( value='Container with this name already exists. The name must ' 'be unique among all the containers in the system', container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise ContainerError( value='Bad request when creating container: %s' % response.body, container_name=container_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def delete_container(self, container): # Note: All the objects in the container must be deleted first response = self.connection.request('/', method='DELETE', container=container) if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.CONFLICT: raise ContainerIsNotEmptyError( value='Container must be empty before it can be deleted.', container_name=container.name, driver=self) elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return False def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', raw=True, container=obj.container) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', raw=True, container=obj.container) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, headers=None): return self._put_object(container=container, object_name=object_name, extra=extra, file_path=file_path, verify_hash=verify_hash) def upload_object_via_stream(self, iterator, container, object_name, extra=None, headers=None): method = 'PUT' params = None if self.supports_multipart_upload: # @TODO: This needs implementing again from scratch. pass return self._put_object(container=container, object_name=object_name, extra=extra, method=method, query_args=params, stream=iterator, verify_hash=False) def delete_object(self, obj): object_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(object_path, method='DELETE', container=obj.container) if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, object_name=obj.name) return False def ex_iterate_multipart_uploads(self, container, prefix=None, delimiter=None, max_uploads=MAX_UPLOADS_PER_RESPONSE): """ Extension method for listing all in-progress OSS multipart uploads. Each multipart upload which has not been committed or aborted is considered in-progress. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Print only uploads of objects with this prefix :type prefix: ``str`` :keyword delimiter: The object/key names are grouped based on being split by this delimiter :type delimiter: ``str`` :keyword max_uploads: The max uplod items returned for one request :type max_uploads: ``int`` :return: A generator of OSSMultipartUpload instances. :rtype: ``generator`` of :class:`OSSMultipartUpload` """ if not self.supports_multipart_upload: raise LibcloudError('Feature not supported', driver=self) request_path = '/?uploads' params = {'max-uploads': max_uploads} if prefix: params['prefix'] = prefix if delimiter: params['delimiter'] = delimiter def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace)) while True: response = self.connection.request(request_path, params=params, container=container) if response.status != httplib.OK: raise LibcloudError('Error fetching multipart uploads. ' 'Got code: %s' % response.status, driver=self) body = response.parse_body() # pylint: disable=maybe-no-member for node in body.findall(fixxpath(xpath='Upload', namespace=self.namespace)): key = finder(node, 'Key') upload_id = finder(node, 'UploadId') initiated = finder(node, 'Initiated') yield OSSMultipartUpload(key, upload_id, initiated) # Check if this is the last entry in the listing # pylint: disable=maybe-no-member is_truncated = body.findtext(fixxpath(xpath='IsTruncated', namespace=self.namespace)) if is_truncated.lower() == 'false': break # Provide params for the next request upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker', namespace=self.namespace)) key_marker = body.findtext(fixxpath(xpath='NextKeyMarker', namespace=self.namespace)) params['key-marker'] = key_marker params['upload-id-marker'] = upload_marker def ex_abort_all_multipart_uploads(self, container, prefix=None): """ Extension method for removing all partially completed OSS multipart uploads. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Delete only uploads of objects with this prefix :type prefix: ``str`` """ # Iterate through the container and delete the upload ids for upload in self.ex_iterate_multipart_uploads(container, prefix, delimiter=None): object_path = self._get_object_path(container, upload.key) self._abort_multipart(object_path, upload.id, container=container) def _clean_object_name(self, name): name = urlquote(name) return name def _put_object(self, container, object_name, method='PUT', query_args=None, extra=None, file_path=None, stream=None, verify_hash=False): """ Create an object and upload data using the given function. """ headers = {} extra = extra or {} content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) acl = extra.get('acl', None) if meta_data: for key, value in list(meta_data.items()): key = self.http_vendor_prefix + 'meta-%s' % (key) headers[key] = value if acl: if acl not in ['public-read', 'private', 'public-read-write']: raise AttributeError('invalid acl value: %s' % acl) headers[self.http_vendor_prefix + 'object-acl'] = acl request_path = self._get_object_path(container, object_name) if query_args: request_path = '?'.join((request_path, query_args)) result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method=method, headers=headers, file_path=file_path, stream=stream) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] headers = response.headers server_hash = headers['etag'].replace('"', '') if (verify_hash and result_dict['data_hash'] != server_hash): raise ObjectHashMismatchError( value='MD5 hash {0} checksum does not match {1}'.format( server_hash, result_dict['data_hash']), object_name=object_name, driver=self) elif response.status == httplib.OK: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) return obj else: raise LibcloudError( 'Unexpected status code, status_code=%s' % (response.status), driver=self) def _upload_multipart(self, response, data, iterator, container, object_name, calculate_hash=True): """ Callback invoked for uploading data to OSS using Aliyun's multipart upload mechanism :param response: Response object from the initial POST request :type response: :class:`OSSRawResponse` :param data: Any data from the initial POST request :type data: ``str`` :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param container: The container owning the object to which data is being uploaded :type container: :class:`Container` :param object_name: The name of the object to which we are uploading :type object_name: ``str`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (status, checksum, bytes transferred) :rtype: ``tuple`` """ object_path = self._get_object_path(container, object_name) # Get the upload id from the response xml response.body = response.response.read() body = response.parse_body() upload_id = body.find(fixxpath(xpath='UploadId', namespace=self.namespace)).text try: # Upload the data through the iterator result = self._upload_from_iterator(iterator, object_path, upload_id, calculate_hash, container=container) (chunks, data_hash, bytes_transferred) = result # Commit the chunk info and complete the upload etag = self._commit_multipart(object_path, upload_id, chunks, container=container) except Exception: exc = sys.exc_info()[1] # Amazon provides a mechanism for aborting an upload. self._abort_multipart(object_path, upload_id, container=container) raise exc # Modify the response header of the first request. This is used # by other functions once the callback is done response.headers['etag'] = etag return (True, data_hash, bytes_transferred) def _upload_from_iterator(self, iterator, object_path, upload_id, calculate_hash=True, container=None): """ Uploads data from an interator in fixed sized chunks to OSS :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param object_path: The path of the object to which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :keyword container: the container object to upload object to :type container: :class:`Container` :return: A tuple of (chunk info, checksum, bytes transferred) :rtype: ``tuple`` """ data_hash = None if calculate_hash: data_hash = self._get_hash_function() bytes_transferred = 0 count = 1 chunks = [] params = {'uploadId': upload_id} # Read the input data in chunk sizes suitable for AWS for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE, fill_size=True, yield_empty=True): bytes_transferred += len(data) if calculate_hash: data_hash.update(data) chunk_hash = self._get_hash_function() chunk_hash.update(data) chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8') # OSS will calculate hash of the uploaded data and # check this header. headers = {'Content-MD5': chunk_hash} params['partNumber'] = count request_path = '?'.join((object_path, urlencode(params))) resp = self.connection.request(request_path, method='PUT', data=data, headers=headers, container=container) if resp.status != httplib.OK: raise LibcloudError('Error uploading chunk', driver=self) server_hash = resp.headers['etag'] # Keep this data for a later commit chunks.append((count, server_hash)) count += 1 if calculate_hash: data_hash = data_hash.hexdigest() return (chunks, data_hash, bytes_transferred) def _commit_multipart(self, object_path, upload_id, chunks, container=None): """ Makes a final commit of the data. :param object_path: Server side object path. :type object_path: ``str`` :param upload_id: ID of the multipart upload. :type upload_id: ``str`` :param upload_id: A list of (chunk_number, chunk_hash) tuples. :type upload_id: ``list`` :keyword container: The container owning the object to which data is being uploaded :type container: :class:`Container` """ root = Element('CompleteMultipartUpload') for (count, etag) in chunks: part = SubElement(root, 'Part') part_no = SubElement(part, 'PartNumber') part_no.text = str(count) etag_id = SubElement(part, 'ETag') etag_id.text = str(etag) data = tostring(root) params = {'uploadId': upload_id} request_path = '?'.join((object_path, urlencode(params))) response = self.connection.request(request_path, data=data, method='POST', container=container) if response.status != httplib.OK: element = response.object # pylint: disable=maybe-no-member code, message = response._parse_error_details(element=element) msg = 'Error in multipart commit: %s (%s)' % (message, code) raise LibcloudError(msg, driver=self) # Get the server's etag to be passed back to the caller body = response.parse_body() server_hash = body.find(fixxpath(xpath='ETag', namespace=self.namespace)).text return server_hash def _abort_multipart(self, object_path, upload_id, container=None): """ Aborts an already initiated multipart upload :param object_path: Server side object path. :type object_path: ``str`` :param upload_id: ID of the multipart upload. :type upload_id: ``str`` :keyword container: The container owning the object to which data is being uploaded :type container: :class:`Container` """ params = {'uploadId': upload_id} request_path = '?'.join((object_path, urlencode(params))) resp = self.connection.request(request_path, method='DELETE', container=container) if resp.status != httplib.NO_CONTENT: raise LibcloudError('Error in multipart abort. status_code=%d' % (resp.status), driver=self) def _to_containers(self, obj, xpath): for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace)): yield self._to_container(element) def _to_container(self, element): extra = { 'creation_date': findtext(element=element, xpath='CreationDate', namespace=self.namespace), 'location': findtext(element=element, xpath='Location', namespace=self.namespace) } container = Container(name=findtext(element=element, xpath='Name', namespace=self.namespace), extra=extra, driver=self ) return container def _to_objs(self, obj, xpath, container): return [self._to_obj(element, container) for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] def _to_obj(self, element, container): owner_id = findtext(element=element, xpath='Owner/ID', namespace=self.namespace) owner_display_name = findtext(element=element, xpath='Owner/DisplayName', namespace=self.namespace) meta_data = {'owner': {'id': owner_id, 'display_name': self._safe_decode( owner_display_name)}} last_modified = findtext(element=element, xpath='LastModified', namespace=self.namespace) extra = {'last_modified': last_modified} name = self._safe_decode(findtext(element=element, xpath='Key', namespace=self.namespace)) obj = Object(name=name, size=int(findtext(element=element, xpath='Size', namespace=self.namespace)), hash=findtext(element=element, xpath='ETag', namespace=self.namespace).replace('"', ''), extra=extra, meta_data=meta_data, container=container, driver=self ) return obj def _safe_decode(self, encoded): """ Decode it as an escaped string and then treate the content as UTF-8 encoded. """ try: if encoded: unescaped, _ign = codecs.escape_decode(encoded) return unescaped.decode('utf-8') return encoded except Exception: return encoded def _get_container_path(self, container): """ Return a container path :param container: Container instance :type container: :class:`Container` :return: A path for this container. :rtype: ``str`` """ return '/%s' % (container.name) def _get_object_path(self, container, object_name): """ Return an object's path. Aliyun OSS api puts the container name in the host, so ignore container here. :param container: Container instance :type container: :class:`Container` :param object_name: Object name :type object_name: :class:`str` :return: A path for this object. :rtype: ``str`` """ object_name_cleaned = self._clean_object_name(object_name) object_path = '/%s' % object_name_cleaned return object_path def _headers_to_object(self, object_name, container, headers): hash = headers['etag'].replace('"', '') extra = {'content_type': headers['content-type'], 'etag': headers['etag']} meta_data = {} if 'last-modified' in headers: extra['last_modified'] = headers['last-modified'] for key, value in headers.items(): if not key.lower().startswith(self.http_vendor_prefix + 'meta-'): continue key = key.replace(self.http_vendor_prefix + 'meta-', '') meta_data[key] = value obj = Object(name=object_name, size=int(headers['content-length']), hash=hash, extra=extra, meta_data=meta_data, container=container, driver=self) return obj apache-libcloud-2.2.1/libcloud/storage/drivers/auroraobjects.py0000664000175000017500000000361512701023453024554 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.types import LibcloudError from libcloud.storage.providers import Provider from libcloud.storage.drivers.s3 import BaseS3StorageDriver, BaseS3Connection __all__ = [ 'AuroraObjectsStorageDriver' ] AURORA_OBJECTS_EU_HOST = 'o.auroraobjects.eu' NO_CDN_SUPPORT_ERROR = 'CDN is not supported by AuroraObjects' class BaseAuroraObjectsConnection(BaseS3Connection): host = AURORA_OBJECTS_EU_HOST class BaseAuroraObjectsStorageDriver(BaseS3StorageDriver): type = Provider.AURORAOBJECTS name = 'PCextreme AuroraObjects' website = 'https://www.pcextreme.com/aurora/objects' class AuroraObjectsStorageDriver(BaseAuroraObjectsStorageDriver): connectionCls = BaseAuroraObjectsConnection def enable_container_cdn(self, *argv): raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self) def enable_object_cdn(self, *argv): raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self) def get_container_cdn_url(self, *argv): raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self) def get_object_cdn_url(self, *argv): raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self) apache-libcloud-2.2.1/libcloud/storage/drivers/google_storage.py0000664000175000017500000003534113153541406024717 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import email.utils from libcloud.common.base import ConnectionUserAndKey from libcloud.common.google import GoogleAuthType from libcloud.common.google import GoogleOAuth2Credential from libcloud.common.google import GoogleResponse from libcloud.common.types import ProviderError from libcloud.storage.drivers.s3 import BaseS3Connection from libcloud.storage.drivers.s3 import BaseS3StorageDriver from libcloud.storage.drivers.s3 import S3RawResponse from libcloud.storage.drivers.s3 import S3Response from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote # Docs are a lie. Actual namespace returned is different that the one listed # in the docs. SIGNATURE_IDENTIFIER = 'GOOG1' API_VERSION = '2006-03-01' NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION) def _clean_object_name(name): """ Return the URL encoded name. name=None returns None. Useful for input checking without having to check for None first. :param name: The object name :type name: ``str`` or ``None`` :return: The url-encoded object name or None if name=None. :rtype ``str`` or ``None`` """ return urlquote(name, safe='') if name else None class ContainerPermissions(object): values = ['NONE', 'READER', 'WRITER', 'OWNER'] NONE = 0 READER = 1 WRITER = 2 OWNER = 3 class ObjectPermissions(object): values = ['NONE', 'READER', 'OWNER'] NONE = 0 READER = 1 OWNER = 2 class GoogleStorageConnection(ConnectionUserAndKey): """ Represents a single connection to the Google storage API endpoint. This can either authenticate via the Google OAuth2 methods or via the S3 HMAC interoperability method. """ host = 'storage.googleapis.com' responseCls = S3Response rawResponseCls = S3RawResponse PROJECT_ID_HEADER = 'x-goog-project-id' def __init__(self, user_id, key, secure=True, auth_type=None, credential_file=None, **kwargs): self.auth_type = auth_type or GoogleAuthType.guess_type(user_id) if GoogleAuthType.is_oauth2(self.auth_type): self.oauth2_credential = GoogleOAuth2Credential( user_id, key, self.auth_type, credential_file, **kwargs) else: self.oauth2_credential = None super(GoogleStorageConnection, self).__init__(user_id, key, secure, **kwargs) def add_default_headers(self, headers): date = email.utils.formatdate(usegmt=True) headers['Date'] = date project = self.get_project() if project: headers[self.PROJECT_ID_HEADER] = project return headers def get_project(self): return getattr(self.driver, 'project', None) def pre_connect_hook(self, params, headers): if self.auth_type == GoogleAuthType.GCS_S3: signature = self._get_s3_auth_signature(params, headers) headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER, self.user_id, signature) else: headers['Authorization'] = ('Bearer ' + self.oauth2_credential.access_token) return params, headers def _get_s3_auth_signature(self, params, headers): """Hacky wrapper to work with S3's get_auth_signature.""" headers_copy = {} params_copy = copy.deepcopy(params) # Lowercase all headers except 'date' and Google header values for k, v in headers.items(): k_lower = k.lower() if (k_lower == 'date' or k_lower.startswith( GoogleStorageDriver.http_vendor_prefix) or not isinstance(v, str)): headers_copy[k_lower] = v else: headers_copy[k_lower] = v.lower() return BaseS3Connection.get_auth_signature( method=self.method, headers=headers_copy, params=params_copy, expires=None, secret_key=self.key, path=self.action, vendor_prefix=GoogleStorageDriver.http_vendor_prefix) class GCSResponse(GoogleResponse): pass class GoogleStorageJSONConnection(GoogleStorageConnection): """ Represents a single connection to the Google storage JSON API endpoint. This can either authenticate via the Google OAuth2 methods or via the S3 HMAC interoperability method. """ host = 'www.googleapis.com' responseCls = GCSResponse rawResponseCls = None def add_default_headers(self, headers): headers = super(GoogleStorageJSONConnection, self).add_default_headers( headers) headers['Content-Type'] = 'application/json' return headers class GoogleStorageDriver(BaseS3StorageDriver): """ Driver for Google Cloud Storage. Can authenticate via standard Google Cloud methods (Service Accounts, Installed App credentials, and GCE instance service accounts) Examples: Service Accounts:: driver = GoogleStorageDriver(key=client_email, secret=private_key, ...) Installed Application:: driver = GoogleStorageDriver(key=client_id, secret=client_secret, ...) From GCE instance:: driver = GoogleStorageDriver(key=foo, secret=bar, ...) Can also authenticate via Google Cloud Storage's S3 HMAC interoperability API. S3 user keys are 20 alphanumeric characters, starting with GOOG. Example:: driver = GoogleStorageDriver(key='GOOG0123456789ABCXYZ', secret=key_secret) """ name = 'Google Cloud Storage' website = 'http://cloud.google.com/storage' connectionCls = GoogleStorageConnection jsonConnectionCls = GoogleStorageJSONConnection hash_type = 'md5' namespace = NAMESPACE supports_chunked_encoding = False supports_s3_multipart_upload = False http_vendor_prefix = 'x-goog' def __init__(self, key, secret=None, project=None, **kwargs): super(GoogleStorageDriver, self).__init__(key, secret, **kwargs) self.project = project self.json_connection = GoogleStorageJSONConnection( key, secret, **kwargs) def _get_container_permissions(self, container_name): """ Return the container permissions for the current authenticated user. :param container_name: The container name. :param container_name: ``str`` :return: The permissions on the container. :rtype: ``int`` from ContainerPermissions """ # Try OWNER permissions first: try listing the bucket ACL. # FORBIDDEN -> exists, but not an OWNER. # NOT_FOUND -> bucket DNE, return NONE. try: self.json_connection.request( '/storage/v1/b/%s/acl' % container_name) return ContainerPermissions.OWNER except ProviderError as e: if e.http_code == httplib.FORBIDDEN: pass elif e.http_code == httplib.NOT_FOUND: return ContainerPermissions.NONE else: raise # Try WRITER permissions with a noop request: try delete with an # impossible precondition. Authorization is checked before file # existence or preconditions. So, if we get a NOT_FOUND or a # PRECONDITION_FAILED, then we must be authorized. try: self.json_connection.request( '/storage/v1/b/%s/o/writecheck' % container_name, headers={'x-goog-if-generation-match': '0'}, method='DELETE') except ProviderError as e: if e.http_code in [httplib.NOT_FOUND, httplib.PRECONDITION_FAILED]: return ContainerPermissions.WRITER elif e.http_code != httplib.FORBIDDEN: raise # Last, try READER permissions: try getting container metadata. try: self.json_connection.request('/storage/v1/b/%s' % container_name) return ContainerPermissions.READER except ProviderError as e: if e.http_code not in [httplib.FORBIDDEN, httplib.NOT_FOUND]: raise return ContainerPermissions.NONE def _get_user(self): """Gets this drivers' authenticated user, if any.""" oauth2_creds = getattr(self.connection, 'oauth2_credential') if oauth2_creds: return oauth2_creds.user_id else: return None def _get_object_permissions(self, container_name, object_name): """ Return the object permissions for the current authenticated user. If the object does not exist, or no object_name is given, return the default object permissions. :param container_name: The container name. :type container_name: ``str`` :param object_name: The object name. :type object_name: ``str`` :return: The permissions on the object or default object permissions. :rtype: ``int`` from ObjectPermissions """ # Try OWNER permissions first: try listing the object ACL. try: self.json_connection.request( '/storage/v1/b/%s/o/%s/acl' % (container_name, object_name)) return ObjectPermissions.OWNER except ProviderError as e: if e.http_code not in [httplib.FORBIDDEN, httplib.NOT_FOUND]: raise # Try READER permissions: try getting the object. try: self.json_connection.request( '/storage/v1/b/%s/o/%s' % (container_name, object_name)) return ObjectPermissions.READER except ProviderError as e: if e.http_code not in [httplib.FORBIDDEN, httplib.NOT_FOUND]: raise return ObjectPermissions.NONE def ex_delete_permissions(self, container_name, object_name=None, entity=None): """ Delete permissions for an ACL entity on a container or object. :param container_name: The container name. :type container_name: ``str`` :param object_name: The object name. Optional. Not providing an object will delete a container permission. :type object_name: ``str`` :param entity: The entity to whose permission will be deleted. Optional. If not provided, the role will be applied to the authenticated user, if using an OAuth2 authentication scheme. :type entity: ``str`` or ``None`` """ object_name = _clean_object_name(object_name) if not entity: user_id = self._get_user() if not user_id: raise ValueError( 'Must provide an entity. Driver is not using an ' 'authenticated user.') else: entity = 'user-%s' % user_id if object_name: url = ('/storage/v1/b/%s/o/%s/acl/%s' % (container_name, object_name, entity)) else: url = '/storage/v1/b/%s/acl/%s' % (container_name, entity) self.json_connection.request(url, method='DELETE') def ex_get_permissions(self, container_name, object_name=None): """ Return the permissions for the currently authenticated user. :param container_name: The container name. :type container_name: ``str`` :param object_name: The object name. Optional. Not providing an object will return only container permissions. :type object_name: ``str`` or ``None`` :return: A tuple of container and object permissions. :rtype: ``tuple`` of (``int``, ``int`` or ``None``) from ContainerPermissions and ObjectPermissions, respectively. """ object_name = _clean_object_name(object_name) obj_perms = self._get_object_permissions( container_name, object_name) if object_name else None return self._get_container_permissions(container_name), obj_perms def ex_set_permissions(self, container_name, object_name=None, entity=None, role=None): """ Set the permissions for an ACL entity on a container or an object. :param container_name: The container name. :type container_name: ``str`` :param object_name: The object name. Optional. Not providing an object will apply the acl to the container. :type object_name: ``str`` :param entity: The entity to which apply the role. Optional. If not provided, the role will be applied to the authenticated user, if using an OAuth2 authentication scheme. :type entity: ``str`` :param role: The permission/role to set on the entity. :type role: ``int`` from ContainerPermissions or ObjectPermissions or ``str``. :raises ValueError: If no entity was given, but was required. Or if the role isn't valid for the bucket or object. """ object_name = _clean_object_name(object_name) if isinstance(role, int): perms = ObjectPermissions if object_name else ContainerPermissions try: role = perms.values[role] except IndexError: raise ValueError( '%s is not a valid role level for container=%s object=%s' % (role, container_name, object_name)) elif not isinstance(role, str): raise ValueError('%s is not a valid permission.' % role) if not entity: user_id = self._get_user() if not user_id: raise ValueError( 'Must provide an entity. Driver is not using an ' 'authenticated user.') else: entity = 'user-%s' % user_id if object_name: url = '/storage/v1/b/%s/o/%s/acl' % (container_name, object_name) else: url = '/storage/v1/b/%s/acl' % container_name self.json_connection.request( url, method='POST', data=json.dumps({'role': role, 'entity': entity})) apache-libcloud-2.2.1/libcloud/storage/drivers/ktucloud.py0000664000175000017500000000363212701023453023542 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.types import LibcloudError from libcloud.storage.providers import Provider from libcloud.storage.drivers.cloudfiles import CloudFilesConnection from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver KTUCLOUDSTORAGE_AUTH_URL = "https://ssproxy.ucloudbiz.olleh.com/auth/v1.0" KTUCLOUDSTORAGE_API_VERSION = "1.0" class KTUCloudStorageConnection(CloudFilesConnection): """ Connection class for the KT UCloud Storage endpoint. """ auth_url = KTUCLOUDSTORAGE_AUTH_URL _auth_version = KTUCLOUDSTORAGE_API_VERSION def get_endpoint(self): eps = self.service_catalog.get_endpoints(name='cloudFiles') if len(eps) == 0: raise LibcloudError('Could not find specified endpoint') ep = eps[0] public_url = ep.url if not public_url: raise LibcloudError('Could not find specified endpoint') return public_url class KTUCloudStorageDriver(CloudFilesStorageDriver): """ Cloudfiles storage driver for the UK endpoint. """ type = Provider.KTUCLOUD name = 'KTUCloud Storage' connectionCls = KTUCloudStorageConnection apache-libcloud-2.2.1/libcloud/storage/drivers/s3.py0000664000175000017500000012301113153541406022234 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hmac import time from hashlib import sha1 import libcloud.utils.py3 try: if libcloud.utils.py3.DEFAULT_LXML: from lxml.etree import Element, SubElement else: from xml.etree.ElementTree import Element, SubElement except ImportError: from xml.etree.ElementTree import Element, SubElement from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import b from libcloud.utils.py3 import tostring from libcloud.utils.xml import fixxpath, findtext from libcloud.utils.files import read_in_chunks from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.base import ConnectionUserAndKey, RawResponse from libcloud.common.aws import AWSBaseResponse, AWSDriver, \ AWSTokenConnection, SignedAWSConnection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError # How long before the token expires EXPIRATION_SECONDS = 15 * 60 S3_US_STANDARD_HOST = 's3.amazonaws.com' S3_US_EAST2_HOST = 's3-us-east-2.amazonaws.com' S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com' S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com' S3_US_GOV_WEST_HOST = 's3-us-gov-west-1.amazonaws.com' S3_CN_NORTH_HOST = 's3.cn-north-1.amazonaws.com.cn' S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com' S3_EU_WEST2_HOST = 's3-eu-west-2.amazonaws.com' S3_EU_CENTRAL_HOST = 's3-eu-central-1.amazonaws.com' S3_AP_SOUTH_HOST = 's3-ap-south-1.amazonaws.com' S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com' S3_AP_SOUTHEAST2_HOST = 's3-ap-southeast-2.amazonaws.com' S3_AP_NORTHEAST1_HOST = 's3-ap-northeast-1.amazonaws.com' S3_AP_NORTHEAST2_HOST = 's3-ap-northeast-2.amazonaws.com' S3_AP_NORTHEAST_HOST = S3_AP_NORTHEAST1_HOST S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com' S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com' S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com' API_VERSION = '2006-03-01' NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION) # AWS multi-part chunks must be minimum 5MB CHUNK_SIZE = 5 * 1024 * 1024 # Desired number of items in each response inside a paginated request in # ex_iterate_multipart_uploads. RESPONSES_PER_REQUEST = 100 class S3Response(AWSBaseResponse): namespace = None valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, httplib.BAD_REQUEST] def success(self): i = int(self.status) return i >= 200 and i <= 299 or i in self.valid_response_codes def parse_error(self): if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(self.body) elif self.status == httplib.MOVED_PERMANENTLY: raise LibcloudError('This bucket is located in a different ' + 'region. Please use the correct driver.', driver=S3StorageDriver) raise LibcloudError('Unknown error. Status code: %d' % (self.status), driver=S3StorageDriver) class S3RawResponse(S3Response, RawResponse): pass class BaseS3Connection(ConnectionUserAndKey): """ Represents a single connection to the S3 Endpoint """ host = 's3.amazonaws.com' responseCls = S3Response rawResponseCls = S3RawResponse @staticmethod def get_auth_signature(method, headers, params, expires, secret_key, path, vendor_prefix): """ Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); StringToSign = HTTP-VERB + "\n" + Content-MD5 + "\n" + Content-Type + "\n" + Expires + "\n" + CanonicalizedVendorHeaders + CanonicalizedResource; """ special_headers = {'content-md5': '', 'content-type': '', 'date': ''} vendor_headers = {} for key, value in list(headers.items()): key_lower = key.lower() if key_lower in special_headers: special_headers[key_lower] = value.strip() elif key_lower.startswith(vendor_prefix): vendor_headers[key_lower] = value.strip() if expires: special_headers['date'] = str(expires) buf = [method] for _, value in sorted(special_headers.items()): buf.append(value) string_to_sign = '\n'.join(buf) buf = [] for key, value in sorted(vendor_headers.items()): buf.append('%s:%s' % (key, value)) header_string = '\n'.join(buf) values_to_sign = [] for value in [string_to_sign, header_string, path]: if value: values_to_sign.append(value) string_to_sign = '\n'.join(values_to_sign) b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() ) return b64_hmac.decode('utf-8') def add_default_params(self, params): expires = str(int(time.time()) + EXPIRATION_SECONDS) params['AWSAccessKeyId'] = self.user_id params['Expires'] = expires return params def pre_connect_hook(self, params, headers): params['Signature'] = self.get_auth_signature( method=self.method, headers=headers, params=params, expires=params['Expires'], secret_key=self.key, path=self.action, vendor_prefix=self.driver.http_vendor_prefix) return params, headers class S3Connection(AWSTokenConnection, BaseS3Connection): """ Represents a single connection to the S3 endpoint, with AWS-specific features. """ pass class S3SignatureV4Connection(SignedAWSConnection, BaseS3Connection): service_name = 's3' version = API_VERSION def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None): super(S3SignatureV4Connection, self).__init__( user_id, key, secure, host, port, url, timeout, proxy_url, token, retry_delay, backoff, 4) # force version 4 class S3MultipartUpload(object): """ Class representing an amazon s3 multipart upload """ def __init__(self, key, id, created_at, initiator, owner): """ Class representing an amazon s3 multipart upload :param key: The object/key that was being uploaded :type key: ``str`` :param id: The upload id assigned by amazon :type id: ``str`` :param created_at: The date/time at which the upload was started :type created_at: ``str`` :param initiator: The AWS owner/IAM user who initiated this :type initiator: ``str`` :param owner: The AWS owner/IAM who will own this object :type owner: ``str`` """ self.key = key self.id = id self.created_at = created_at self.initiator = initiator self.owner = owner def __repr__(self): return ('' % (self.key)) class BaseS3StorageDriver(StorageDriver): name = 'Amazon S3 (standard)' website = 'http://aws.amazon.com/s3/' connectionCls = BaseS3Connection hash_type = 'md5' supports_chunked_encoding = False supports_s3_multipart_upload = True ex_location_name = '' namespace = NAMESPACE http_vendor_prefix = 'x-amz' def iterate_containers(self): response = self.connection.request('/') if response.status == httplib.OK: containers = self._to_containers(obj=response.object, xpath='Buckets/Bucket') return containers raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def list_container_objects(self, container, ex_prefix=None): """ Return a list of objects for the given container. :param container: Container instance. :type container: :class:`Container` :param ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A list of Object instances. :rtype: ``list`` of :class:`Object` """ return list(self.iterate_container_objects(container, ex_prefix=ex_prefix)) def iterate_container_objects(self, container, ex_prefix=None): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :param ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ params = {} if ex_prefix: params['prefix'] = ex_prefix last_key = None exhausted = False container_path = self._get_container_path(container) while not exhausted: if last_key: params['marker'] = last_key response = self.connection.request(container_path, params=params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) objects = self._to_objs(obj=response.object, xpath='Contents', container=container) is_truncated = response.object.findtext(fixxpath( xpath='IsTruncated', namespace=self.namespace)).lower() exhausted = (is_truncated == 'false') last_key = None for obj in objects: last_key = obj.name yield obj def get_container(self, container_name): try: response = self.connection.request('/%s' % container_name, method='HEAD') if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container_name) except InvalidCredsError: # This just means the user doesn't have IAM permissions to do a # HEAD request but other requests might work. pass return Container(name=container_name, extra=None, driver=self) def get_object(self, container_name, object_name): container = self.get_container(container_name=container_name) object_path = self._get_object_path(container, object_name) response = self.connection.request(object_path, method='HEAD') if response.status == httplib.OK: obj = self._headers_to_object(object_name=object_name, container=container, headers=response.headers) return obj raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def _get_container_path(self, container): """ Return a container path :param container: Container instance :type container: :class:`Container` :return: A path for this container. :rtype: ``str`` """ return '/%s' % (container.name) def _get_object_path(self, container, object_name): """ Return an object's CDN path. :param container: Container instance :type container: :class:`Container` :param object_name: Object name :type object_name: :class:`str` :return: A path for this object. :rtype: ``str`` """ container_url = self._get_container_path(container) object_name_cleaned = self._clean_object_name(object_name) object_path = '%s/%s' % (container_url, object_name_cleaned) return object_path def create_container(self, container_name): if self.ex_location_name: root = Element('CreateBucketConfiguration') child = SubElement(root, 'LocationConstraint') child.text = self.ex_location_name data = tostring(root) else: data = '' response = self.connection.request('/%s' % (container_name), data=data, method='PUT') if response.status == httplib.OK: container = Container(name=container_name, extra=None, driver=self) return container elif response.status == httplib.CONFLICT: raise InvalidContainerNameError( value='Container with this name already exists. The name must ' 'be unique among all the containers in the system', container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise ContainerError( value='Bad request when creating container: %s' % response.body, container_name=container_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def delete_container(self, container): # Note: All the objects in the container must be deleted first response = self.connection.request('/%s' % (container.name), method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.CONFLICT: raise ContainerIsNotEmptyError( value='Container must be empty before it can be deleted.', container_name=container.name, driver=self) elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return False def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', raw=True) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', stream=True, raw=True) return self._get_object( obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE), 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, ex_storage_class=None): """ @inherits: :class:`StorageDriver.upload_object` :param ex_storage_class: Storage class :type ex_storage_class: ``str`` """ return self._put_object(container=container, object_name=object_name, extra=extra, file_path=file_path, verify_hash=verify_hash, storage_class=ex_storage_class) def _initiate_multipart(self, container, object_name, headers=None): """ Initiates a multipart upload to S3 :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :keyword headers: Additional headers to send with the request :type headers: ``dict`` :return: The id of the newly created multipart upload :rtype: ``str`` """ headers = headers or {} request_path = self._get_object_path(container, object_name) params = {'uploads': ''} response = self.connection.request(request_path, method='POST', headers=headers, params=params) if response.status != httplib.OK: raise LibcloudError('Error initiating multipart upload', driver=self) return findtext(element=response.object, xpath='UploadId', namespace=self.namespace) def _upload_multipart_chunks(self, container, object_name, upload_id, stream, calculate_hash=True): """ Uploads data from an iterator in fixed sized chunks to S3 :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` :param stream: The generator for fetching the upload data :type stream: ``generator`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (chunk info, checksum, bytes transferred) :rtype: ``tuple`` """ data_hash = None if calculate_hash: data_hash = self._get_hash_function() bytes_transferred = 0 count = 1 chunks = [] params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) # Read the input data in chunk sizes suitable for AWS for data in read_in_chunks(stream, chunk_size=CHUNK_SIZE, fill_size=True, yield_empty=True): bytes_transferred += len(data) if calculate_hash: data_hash.update(data) chunk_hash = self._get_hash_function() chunk_hash.update(data) chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8') # The Content-MD5 header provides an extra level of data check and # is recommended by amazon headers = { 'Content-Length': len(data), 'Content-MD5': chunk_hash, } params['partNumber'] = count resp = self.connection.request(request_path, method='PUT', data=data, headers=headers, params=params) if resp.status != httplib.OK: raise LibcloudError('Error uploading chunk', driver=self) server_hash = resp.headers['etag'].replace('"', '') # Keep this data for a later commit chunks.append((count, server_hash)) count += 1 if calculate_hash: data_hash = data_hash.hexdigest() return (chunks, data_hash, bytes_transferred) def _commit_multipart(self, container, object_name, upload_id, chunks): """ Makes a final commit of the data. :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` :param chunks: A list of (chunk_number, chunk_hash) tuples. :type chunks: ``list`` :return: The server side hash of the uploaded data :rtype: ``str`` """ root = Element('CompleteMultipartUpload') for (count, etag) in chunks: part = SubElement(root, 'Part') part_no = SubElement(part, 'PartNumber') part_no.text = str(count) etag_id = SubElement(part, 'ETag') etag_id.text = str(etag) data = tostring(root) headers = {'Content-Length': len(data)} params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) response = self.connection.request(request_path, headers=headers, params=params, data=data, method='POST') if response.status != httplib.OK: element = response.object # pylint: disable=maybe-no-member code, message = response._parse_error_details(element=element) msg = 'Error in multipart commit: %s (%s)' % (message, code) raise LibcloudError(msg, driver=self) # Get the server's etag to be passed back to the caller body = response.parse_body() server_hash = body.find(fixxpath(xpath='ETag', namespace=self.namespace)).text return server_hash def _abort_multipart(self, container, object_name, upload_id): """ Aborts an already initiated multipart upload :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` """ params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) resp = self.connection.request(request_path, method='DELETE', params=params) if resp.status != httplib.NO_CONTENT: raise LibcloudError('Error in multipart abort. status_code=%d' % (resp.status), driver=self) def upload_object_via_stream(self, iterator, container, object_name, extra=None, ex_storage_class=None): """ @inherits: :class:`StorageDriver.upload_object_via_stream` :param ex_storage_class: Storage class :type ex_storage_class: ``str`` """ method = 'PUT' params = None # This driver is used by other S3 API compatible drivers also. # Amazon provides a different (complex?) mechanism to do multipart # uploads if self.supports_s3_multipart_upload: return self._put_object_multipart(container=container, object_name=object_name, extra=extra, stream=iterator, verify_hash=False, storage_class=ex_storage_class) return self._put_object(container=container, object_name=object_name, extra=extra, method=method, query_args=params, stream=iterator, verify_hash=False, storage_class=ex_storage_class) def delete_object(self, obj): object_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(object_path, method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, object_name=obj.name) return False def ex_iterate_multipart_uploads(self, container, prefix=None, delimiter=None): """ Extension method for listing all in-progress S3 multipart uploads. Each multipart upload which has not been committed or aborted is considered in-progress. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Print only uploads of objects with this prefix :type prefix: ``str`` :keyword delimiter: The object/key names are grouped based on being split by this delimiter :type delimiter: ``str`` :return: A generator of S3MultipartUpload instances. :rtype: ``generator`` of :class:`S3MultipartUpload` """ if not self.supports_s3_multipart_upload: raise LibcloudError('Feature not supported', driver=self) # Get the data for a specific container request_path = self._get_container_path(container) params = {'max-uploads': RESPONSES_PER_REQUEST, 'uploads': ''} if prefix: params['prefix'] = prefix if delimiter: params['delimiter'] = delimiter def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace)) while True: response = self.connection.request(request_path, params=params) if response.status != httplib.OK: raise LibcloudError('Error fetching multipart uploads. ' 'Got code: %s' % response.status, driver=self) body = response.parse_body() # pylint: disable=maybe-no-member for node in body.findall(fixxpath(xpath='Upload', namespace=self.namespace)): initiator = node.find(fixxpath(xpath='Initiator', namespace=self.namespace)) owner = node.find(fixxpath(xpath='Owner', namespace=self.namespace)) key = finder(node, 'Key') upload_id = finder(node, 'UploadId') created_at = finder(node, 'Initiated') initiator = finder(initiator, 'DisplayName') owner = finder(owner, 'DisplayName') yield S3MultipartUpload(key, upload_id, created_at, initiator, owner) # Check if this is the last entry in the listing # pylint: disable=maybe-no-member is_truncated = body.findtext(fixxpath(xpath='IsTruncated', namespace=self.namespace)) if is_truncated.lower() == 'false': break # Provide params for the next request upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker', namespace=self.namespace)) key_marker = body.findtext(fixxpath(xpath='NextKeyMarker', namespace=self.namespace)) params['key-marker'] = key_marker params['upload-id-marker'] = upload_marker def ex_cleanup_all_multipart_uploads(self, container, prefix=None): """ Extension method for removing all partially completed S3 multipart uploads. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Delete only uploads of objects with this prefix :type prefix: ``str`` """ # Iterate through the container and delete the upload ids for upload in self.ex_iterate_multipart_uploads(container, prefix, delimiter=None): self._abort_multipart(container, upload.key, upload.id) def _clean_object_name(self, name): name = urlquote(name) return name def _put_object(self, container, object_name, method='PUT', query_args=None, extra=None, file_path=None, stream=None, verify_hash=True, storage_class=None): headers = {} extra = extra or {} headers.update(self._to_storage_class_headers(storage_class)) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) acl = extra.get('acl', None) if meta_data: for key, value in list(meta_data.items()): key = self.http_vendor_prefix + '-meta-%s' % (key) headers[key] = value if acl: headers[self.http_vendor_prefix + '-acl'] = acl request_path = self._get_object_path(container, object_name) if query_args: request_path = '?'.join((request_path, query_args)) result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method=method, headers=headers, file_path=file_path, stream=stream) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] headers = response.headers response = response server_hash = headers.get('etag', '').replace('"', '') if (verify_hash and result_dict['data_hash'] != server_hash): raise ObjectHashMismatchError( value='MD5 hash {0} checksum does not match {1}'.format( server_hash, result_dict['data_hash']), object_name=object_name, driver=self) elif response.status == httplib.OK: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) return obj else: raise LibcloudError( 'Unexpected status code, status_code=%s' % (response.status), driver=self) def _put_object_multipart(self, container, object_name, stream, extra=None, verify_hash=False, storage_class=None): """ Uploads an object using the S3 multipart algorithm. :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param stream: The generator for fetching the upload data :type stream: ``generator`` :keyword verify_hash: Indicates if we must calculate the data hash :type verify_hash: ``bool`` :keyword extra: Additional options :type extra: ``dict`` :keyword storage_class: The name of the S3 object's storage class :type extra: ``str`` :return: The uploaded object :rtype: :class:`Object` """ headers = {} extra = extra or {} headers.update(self._to_storage_class_headers(storage_class)) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) acl = extra.get('acl', None) if content_type: headers['Content-Type'] = content_type if meta_data: for key, value in list(meta_data.items()): key = self.http_vendor_prefix + '-meta-%s' % (key) headers[key] = value if acl: headers[self.http_vendor_prefix + '-acl'] = acl upload_id = self._initiate_multipart(container, object_name, headers=headers) try: result = self._upload_multipart_chunks(container, object_name, upload_id, stream, calculate_hash=verify_hash) chunks, data_hash, bytes_transferred = result # Commit the chunk info and complete the upload etag = self._commit_multipart(container, object_name, upload_id, chunks) except Exception: # Amazon provides a mechanism for aborting an upload. self._abort_multipart(container, object_name, upload_id) raise return Object( name=object_name, size=bytes_transferred, hash=etag, extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) def _to_storage_class_headers(self, storage_class): """ Generates request headers given a storage class name. :keyword storage_class: The name of the S3 object's storage class :type extra: ``str`` :return: Headers to include in a request :rtype: :dict: """ headers = {} storage_class = storage_class or 'standard' if storage_class not in ['standard', 'reduced_redundancy']: raise ValueError( 'Invalid storage class value: %s' % (storage_class)) key = self.http_vendor_prefix + '-storage-class' headers[key] = storage_class.upper() return headers def _to_containers(self, obj, xpath): for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace)): yield self._to_container(element) def _to_objs(self, obj, xpath, container): return [self._to_obj(element, container) for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] def _to_container(self, element): extra = { 'creation_date': findtext(element=element, xpath='CreationDate', namespace=self.namespace) } container = Container(name=findtext(element=element, xpath='Name', namespace=self.namespace), extra=extra, driver=self ) return container def _headers_to_object(self, object_name, container, headers): hash = headers['etag'].replace('"', '') extra = {'content_type': headers['content-type'], 'etag': headers['etag']} meta_data = {} if 'last-modified' in headers: extra['last_modified'] = headers['last-modified'] for key, value in headers.items(): if not key.lower().startswith(self.http_vendor_prefix + '-meta-'): continue key = key.replace(self.http_vendor_prefix + '-meta-', '') meta_data[key] = value obj = Object(name=object_name, size=headers['content-length'], hash=hash, extra=extra, meta_data=meta_data, container=container, driver=self) return obj def _to_obj(self, element, container): owner_id = findtext(element=element, xpath='Owner/ID', namespace=self.namespace) owner_display_name = findtext(element=element, xpath='Owner/DisplayName', namespace=self.namespace) meta_data = {'owner': {'id': owner_id, 'display_name': owner_display_name}} last_modified = findtext(element=element, xpath='LastModified', namespace=self.namespace) extra = {'last_modified': last_modified} obj = Object(name=findtext(element=element, xpath='Key', namespace=self.namespace), size=int(findtext(element=element, xpath='Size', namespace=self.namespace)), hash=findtext(element=element, xpath='ETag', namespace=self.namespace).replace('"', ''), extra=extra, meta_data=meta_data, container=container, driver=self ) return obj class S3StorageDriver(AWSDriver, BaseS3StorageDriver): name = 'Amazon S3 (us-east-1)' connectionCls = S3SignatureV4Connection region_name = 'us-east-1' class S3USEast2Connection(S3SignatureV4Connection): host = S3_US_EAST2_HOST class S3USEast2StorageDriver(S3StorageDriver): name = 'Amazon S3 (us-east-2)' connectionCls = S3USEast2Connection ex_location_name = 'us-east-2' region_name = 'us-east-2' class S3USWestConnection(S3SignatureV4Connection): host = S3_US_WEST_HOST class S3USWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-west-1)' connectionCls = S3USWestConnection ex_location_name = 'us-west-1' region_name = 'us-west-1' class S3USWestOregonConnection(S3SignatureV4Connection): host = S3_US_WEST_OREGON_HOST class S3USWestOregonStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-west-2)' connectionCls = S3USWestOregonConnection ex_location_name = 'us-west-2' region_name = 'us-west-2' class S3USGovWestConnection(S3SignatureV4Connection): host = S3_US_GOV_WEST_HOST class S3USGovWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-gov-west-1)' connectionCls = S3USGovWestConnection ex_location_name = 'us-gov-west-1' region_name = 'us-gov-west-1' class S3CNNorthConnection(S3SignatureV4Connection): host = S3_CN_NORTH_HOST class S3CNNorthStorageDriver(S3StorageDriver): name = 'Amazon S3 (cn-north-1)' connectionCls = S3CNNorthConnection ex_location_name = 'cn-north-1' region_name = 'cn-north-1' class S3EUWestConnection(S3SignatureV4Connection): host = S3_EU_WEST_HOST class S3EUWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-west-1)' connectionCls = S3EUWestConnection ex_location_name = 'EU' region_name = 'eu-west-1' class S3EUWest2Connection(S3SignatureV4Connection): host = S3_EU_WEST2_HOST class S3EUWest2StorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-west-2)' connectionCls = S3EUWest2Connection ex_location_name = 'eu-west-2' region_name = 'eu-west-2' class S3EUCentralConnection(S3SignatureV4Connection): host = S3_EU_CENTRAL_HOST class S3EUCentralStorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-central-1)' connectionCls = S3EUCentralConnection ex_location_name = 'eu-central-1' region_name = 'eu-central-1' class S3APSEConnection(S3SignatureV4Connection): host = S3_AP_SOUTHEAST_HOST class S3APSEStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-southeast-1)' connectionCls = S3APSEConnection ex_location_name = 'ap-southeast-1' region_name = 'ap-southeast-1' class S3APSE2Connection(S3SignatureV4Connection): host = S3_AP_SOUTHEAST2_HOST class S3APSE2StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-southeast-2)' connectionCls = S3APSE2Connection ex_location_name = 'ap-southeast-2' region_name = 'ap-southeast-2' class S3APNE1Connection(S3SignatureV4Connection): host = S3_AP_NORTHEAST1_HOST S3APNEConnection = S3APNE1Connection class S3APNE1StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-northeast-1)' connectionCls = S3APNEConnection ex_location_name = 'ap-northeast-1' region_name = 'ap-northeast-1' S3APNEStorageDriver = S3APNE1StorageDriver class S3APNE2Connection(S3SignatureV4Connection): host = S3_AP_NORTHEAST2_HOST class S3APNE2StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-northeast-2)' connectionCls = S3APNE2Connection ex_location_name = 'ap-northeast-2' region_name = 'ap-northeast-2' class S3APSouthConnection(S3SignatureV4Connection): host = S3_AP_SOUTH_HOST class S3APSouthStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-south-1)' connectionCls = S3APSouthConnection ex_location_name = 'ap-south-1' region_name = 'ap-south-1' class S3SAEastConnection(S3SignatureV4Connection): host = S3_SA_EAST_HOST class S3SAEastStorageDriver(S3StorageDriver): name = 'Amazon S3 (sa-east-1)' connectionCls = S3SAEastConnection ex_location_name = 'sa-east-1' region_name = 'sa-east-1' class S3CACentralConnection(S3SignatureV4Connection): host = S3_CA_CENTRAL_HOST class S3CACentralStorageDriver(S3StorageDriver): name = 'Amazon S3 (ca-central-1)' connectionCls = S3CACentralConnection ex_location_name = 'ca-central-1' region_name = 'ca-central-1' apache-libcloud-2.2.1/libcloud/storage/drivers/dummy.py0000664000175000017500000004467713153541406023066 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path import random import hashlib from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import b if PY3: from io import FileIO as file from libcloud.common.types import LibcloudError from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ObjectDoesNotExistError class DummyFileObject(file): def __init__(self, yield_count=5, chunk_len=10): self._yield_count = yield_count self._chunk_len = chunk_len def read(self, size): i = 0 while i < self._yield_count: yield self._get_chunk(self._chunk_len) i += 1 raise StopIteration def _get_chunk(self, chunk_len): chunk = [str(x) for x in random.randint(97, 120)] return chunk def __len__(self): return self._yield_count * self._chunk_len class DummyIterator(object): def __init__(self, data=None): self.hash = hashlib.md5() self._data = data or [] self._current_item = 0 def get_md5_hash(self): return self.hash.hexdigest() def next(self): if self._current_item == len(self._data): raise StopIteration value = self._data[self._current_item] self.hash.update(b(value)) self._current_item += 1 return value def __next__(self): return self.next() def __enter__(self): pass def __exit__(self, type, value, traceback): pass class DummyStorageDriver(StorageDriver): """ Dummy Storage driver. >>> from libcloud.storage.drivers.dummy import DummyStorageDriver >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container(container_name='test container') >>> container >>> container.name 'test container' >>> container.extra['object_count'] 0 """ name = 'Dummy Storage Provider' website = 'http://example.com' def __init__(self, api_key, api_secret): """ :param api_key: API key or username to used (required) :type api_key: ``str`` :param api_secret: Secret password to be used (required) :type api_secret: ``str`` :rtype: ``None`` """ self._containers = {} def get_meta_data(self): """ >>> driver = DummyStorageDriver('key', 'secret') >>> driver.get_meta_data()['object_count'] 0 >>> driver.get_meta_data()['container_count'] 0 >>> driver.get_meta_data()['bytes_used'] 0 >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container_name = 'test container 2' >>> container = driver.create_container(container_name=container_name) >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> driver.get_meta_data()['object_count'] 1 >>> driver.get_meta_data()['container_count'] 2 >>> driver.get_meta_data()['bytes_used'] 50 :rtype: ``dict`` """ container_count = len(self._containers) object_count = sum([len(self._containers[container]['objects']) for container in self._containers]) bytes_used = 0 for container in self._containers: objects = self._containers[container]['objects'] for _, obj in objects.items(): bytes_used += obj.size return {'container_count': int(container_count), 'object_count': int(object_count), 'bytes_used': int(bytes_used)} def iterate_containers(self): """ >>> driver = DummyStorageDriver('key', 'secret') >>> list(driver.iterate_containers()) [] >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' >>> container_name = 'test container 2' >>> container = driver.create_container(container_name=container_name) >>> container >>> container = driver.create_container( ... container_name='test container 2') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerAlreadyExistsError: >>> container_list=list(driver.iterate_containers()) >>> sorted([c.name for c in container_list]) ['test container 1', 'test container 2'] @inherits: :class:`StorageDriver.iterate_containers` """ for container in list(self._containers.values()): yield container['container'] def list_container_objects(self, container): container = self.get_container(container.name) return container.objects def get_container(self, container_name): """ >>> driver = DummyStorageDriver('key', 'secret') >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' >>> driver.get_container('test container 1') @inherits: :class:`StorageDriver.get_container` """ if container_name not in self._containers: raise ContainerDoesNotExistError(driver=self, value=None, container_name=container_name) return self._containers[container_name]['container'] def get_container_cdn_url(self, container): """ >>> driver = DummyStorageDriver('key', 'secret') >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' >>> container.get_cdn_url() 'http://www.test.com/container/test_container_1' @inherits: :class:`StorageDriver.get_container_cdn_url` """ if container.name not in self._containers: raise ContainerDoesNotExistError(driver=self, value=None, container_name=container.name) return self._containers[container.name]['cdn_url'] def get_object(self, container_name, object_name): """ >>> driver = DummyStorageDriver('key', 'secret') >>> driver.get_object('unknown', 'unknown') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> driver.get_object( ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ObjectDoesNotExistError: >>> obj = container.upload_object_via_stream(object_name='test object', ... iterator=DummyFileObject(5, 10), extra={}) >>> obj.name 'test object' >>> obj.size 50 @inherits: :class:`StorageDriver.get_object` """ self.get_container(container_name) container_objects = self._containers[container_name]['objects'] if object_name not in container_objects: raise ObjectDoesNotExistError(object_name=object_name, value=None, driver=self) return container_objects[object_name] def get_object_cdn_url(self, obj): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> obj = container.upload_object_via_stream( ... object_name='test object 5', ... iterator=DummyFileObject(5, 10), extra={}) >>> obj.name 'test object 5' >>> obj.get_cdn_url() 'http://www.test.com/object/test_object_5' @inherits: :class:`StorageDriver.get_object_cdn_url` """ container_name = obj.container.name container_objects = self._containers[container_name]['objects'] if obj.name not in container_objects: raise ObjectDoesNotExistError(object_name=obj.name, value=None, driver=self) return container_objects[obj.name].meta_data['cdn_url'] def create_container(self, container_name): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerAlreadyExistsError: @inherits: :class:`StorageDriver.create_container` """ if container_name in self._containers: raise ContainerAlreadyExistsError(container_name=container_name, value=None, driver=self) extra = {'object_count': 0} container = Container(name=container_name, extra=extra, driver=self) self._containers[container_name] = {'container': container, 'objects': {}, 'cdn_url': 'http://www.test.com/container/%s' % (container_name.replace(' ', '_')) } return container def delete_container(self, container): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = Container(name = 'test container', ... extra={'object_count': 0}, driver=driver) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> len(driver._containers) 1 >>> driver.delete_container(container=container) True >>> len(driver._containers) 0 >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerIsNotEmptyError: @inherits: :class:`StorageDriver.delete_container` """ container_name = container.name if container_name not in self._containers: raise ContainerDoesNotExistError(container_name=container_name, value=None, driver=self) container = self._containers[container_name] if len(container['objects']) > 0: raise ContainerIsNotEmptyError(container_name=container_name, value=None, driver=self) del self._containers[container_name] return True def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): kwargs_dict = {'obj': obj, 'response': DummyFileObject(), 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure} return self._save_object(**kwargs_dict) def download_object_as_stream(self, obj, chunk_size=None): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream(object_name='test object', ... iterator=DummyFileObject(5, 10), extra={}) >>> stream = container.download_object_as_stream(obj) >>> stream #doctest: +ELLIPSIS <...closed...> @inherits: :class:`StorageDriver.download_object_as_stream` """ return DummyFileObject() def upload_object(self, file_path, container, object_name, extra=None, file_hash=None): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container_name = 'test container 1' >>> container = driver.create_container(container_name=container_name) >>> container.upload_object(file_path='/tmp/inexistent.file', ... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): LibcloudError: >>> file_path = path = os.path.abspath(__file__) >>> file_size = os.path.getsize(file_path) >>> obj = container.upload_object(file_path=file_path, ... object_name='test') >>> obj #doctest: +ELLIPSIS >>> obj.size == file_size True @inherits: :class:`StorageDriver.upload_object` :param file_hash: File hash :type file_hash: ``str`` """ if not os.path.exists(file_path): raise LibcloudError(value='File %s does not exist' % (file_path), driver=self) size = os.path.getsize(file_path) return self._add_object(container=container, object_name=object_name, size=size, extra=extra) def upload_object_via_stream(self, iterator, container, object_name, extra=None): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> obj #doctest: +ELLIPSIS @inherits: :class:`StorageDriver.upload_object_via_stream` """ size = len(iterator) return self._add_object(container=container, object_name=object_name, size=size, extra=extra) def delete_object(self, obj): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream(object_name='test object', ... iterator=DummyFileObject(5, 10), extra={}) >>> obj #doctest: +ELLIPSIS >>> container.delete_object(obj=obj) True >>> obj = Object(name='test object 2', ... size=1000, hash=None, extra=None, ... meta_data=None, container=container,driver=None) >>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ObjectDoesNotExistError: @inherits: :class:`StorageDriver.delete_object` """ container_name = obj.container.name object_name = obj.name obj = self.get_object(container_name=container_name, object_name=object_name) del self._containers[container_name]['objects'][object_name] return True def _add_object(self, container, object_name, size, extra=None): container = self.get_container(container.name) extra = extra or {} meta_data = extra.get('meta_data', {}) meta_data.update({'cdn_url': 'http://www.test.com/object/%s' % (object_name.replace(' ', '_'))}) obj = Object(name=object_name, size=size, extra=extra, hash=None, meta_data=meta_data, container=container, driver=self) self._containers[container.name]['objects'][object_name] = obj return obj if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/storage/drivers/nimbus.py0000664000175000017500000001033012701023453023176 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import hashlib import hmac try: import simplejson as json except ImportError: import json # NOQA from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.storage.base import Container, StorageDriver class NimbusResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.NOT_FOUND, httplib.CONFLICT, httplib.BAD_REQUEST] def success(self): return self.status in self.valid_response_codes def parse_error(self): if self.status in [httplib.UNAUTHORIZED]: raise InvalidCredsError(self.body) raise LibcloudError('Unknown error. Status code: %d' % (self.status), driver=self.connection.driver) class NimbusConnection(ConnectionUserAndKey): host = 'nimbus.io' responseCls = NimbusResponse def __init__(self, *args, **kwargs): self.id = kwargs.pop('id') super(NimbusConnection, self).__init__(*args, **kwargs) def pre_connect_hook(self, params, headers): timestamp = str(int(time.time())) signature = self._calculate_signature(user_id=self.user_id, method=self.method, params=params, path=self.action, timestamp=timestamp, key=self.key) headers['X-NIMBUS-IO-Timestamp'] = timestamp headers['Authorization'] = 'NIMBUS.IO %s:%s' % (self.id, signature) return params, headers def _calculate_signature(self, user_id, method, params, path, timestamp, key): if params: uri_path = path + '?' + urlencode(params) else: uri_path = path string_to_sign = [user_id, method, str(timestamp), uri_path] string_to_sign = '\n'.join(string_to_sign) hmac_value = hmac.new(key, string_to_sign, hashlib.sha256) return hmac_value.hexdigest() class NimbusStorageDriver(StorageDriver): name = 'Nimbus.io' website = 'https://nimbus.io/' connectionCls = NimbusConnection def __init__(self, *args, **kwargs): self.user_id = kwargs['user_id'] super(NimbusStorageDriver, self).__init__(*args, **kwargs) def iterate_containers(self): response = self.connection.request('/customers/%s/collections' % (self.user_id)) return self._to_containers(response.object) def create_container(self, container_name): params = {'action': 'create', 'name': container_name} response = self.connection.request('/customers/%s/collections' % (self.user_id), params=params, method='POST') return self._to_container(response.object) def _to_containers(self, data): for item in data: yield self._to_container(item) def _to_container(self, data): name = data[0] extra = {'date_created': data[2]} return Container(name=name, extra=extra, driver=self) def _ex_connection_class_kwargs(self): result = {'id': self.user_id} return result apache-libcloud-2.2.1/libcloud/storage/drivers/local.py0000664000175000017500000004522412701023453023005 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides storage driver for working with local filesystem """ from __future__ import with_statement import errno import os import shutil import sys try: import lockfile from lockfile import LockTimeout, mkdirlockfile except ImportError: raise ImportError('Missing lockfile dependency, you can install it ' 'using pip: pip install lockfile') from libcloud.utils.files import read_in_chunks from libcloud.utils.py3 import relpath from libcloud.utils.py3 import u from libcloud.common.base import Connection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.common.types import LibcloudError from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ObjectError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import InvalidContainerNameError IGNORE_FOLDERS = ['.lock', '.hash'] class LockLocalStorage(object): """ A class to help in locking a local path before being updated """ def __init__(self, path): self.path = path self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True) def __enter__(self): try: self.lock.acquire(timeout=0.1) except LockTimeout: raise LibcloudError('Lock timeout') def __exit__(self, type, value, traceback): if self.lock.is_locked(): self.lock.release() if value is not None: raise value class LocalStorageDriver(StorageDriver): """ Implementation of local file-system based storage. This is helpful where the user would want to use the same code (using libcloud) and switch between cloud storage and local storage """ connectionCls = Connection name = 'Local Storage' website = 'http://example.com' hash_type = 'md5' def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): # Use the key as the path to the storage self.base_path = key if not os.path.isdir(self.base_path): raise LibcloudError('The base path is not a directory') super(LocalStorageDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def _make_path(self, path, ignore_existing=True): """ Create a path by checking if it already exists """ try: os.makedirs(path) except OSError: exp = sys.exc_info()[1] if exp.errno == errno.EEXIST and not ignore_existing: raise exp def _check_container_name(self, container_name): """ Check if the container name is valid :param container_name: Container name :type container_name: ``str`` """ if '/' in container_name or '\\' in container_name: raise InvalidContainerNameError(value=None, driver=self, container_name=container_name) def _make_container(self, container_name): """ Create a container instance :param container_name: Container name. :type container_name: ``str`` :return: Container instance. :rtype: :class:`Container` """ self._check_container_name(container_name) full_path = os.path.join(self.base_path, container_name) try: stat = os.stat(full_path) if not os.path.isdir(full_path): raise OSError('Target path is not a directory') except OSError: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container_name) extra = {} extra['creation_time'] = stat.st_ctime extra['access_time'] = stat.st_atime extra['modify_time'] = stat.st_mtime return Container(name=container_name, extra=extra, driver=self) def _make_object(self, container, object_name): """ Create an object instance :param container: Container. :type container: :class:`Container` :param object_name: Object name. :type object_name: ``str`` :return: Object instance. :rtype: :class:`Object` """ full_path = os.path.join(self.base_path, container.name, object_name) if os.path.isdir(full_path): raise ObjectError(value=None, driver=self, object_name=object_name) try: stat = os.stat(full_path) except Exception: raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) # Make a hash for the file based on the metadata. We can safely # use only the mtime attribute here. If the file contents change, # the underlying file-system will change mtime data_hash = self._get_hash_function() data_hash.update(u(stat.st_mtime).encode('ascii')) data_hash = data_hash.hexdigest() extra = {} extra['creation_time'] = stat.st_ctime extra['access_time'] = stat.st_atime extra['modify_time'] = stat.st_mtime return Object(name=object_name, size=stat.st_size, extra=extra, driver=self, container=container, hash=data_hash, meta_data=None) def iterate_containers(self): """ Return a generator of containers. :return: A generator of Container instances. :rtype: ``generator`` of :class:`Container` """ for container_name in os.listdir(self.base_path): full_path = os.path.join(self.base_path, container_name) if not os.path.isdir(full_path): continue yield self._make_container(container_name) def _get_objects(self, container): """ Recursively iterate through the file-system and return the object names """ cpath = self.get_container_cdn_url(container, check=True) for folder, subfolders, files in os.walk(cpath, topdown=True): # Remove unwanted subfolders for subf in IGNORE_FOLDERS: if subf in subfolders: subfolders.remove(subf) for name in files: full_path = os.path.join(folder, name) object_name = relpath(full_path, start=cpath) yield self._make_object(container, object_name) def iterate_container_objects(self, container): """ Returns a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ return self._get_objects(container) def get_container(self, container_name): """ Return a container instance. :param container_name: Container name. :type container_name: ``str`` :return: :class:`Container` instance. :rtype: :class:`Container` """ return self._make_container(container_name) def get_container_cdn_url(self, container, check=False): """ Return a container CDN URL. :param container: Container instance :type container: :class:`Container` :param check: Indicates if the path's existence must be checked :type check: ``bool`` :return: A CDN URL for this container. :rtype: ``str`` """ path = os.path.join(self.base_path, container.name) if check and not os.path.isdir(path): raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return path def get_object(self, container_name, object_name): """ Return an object instance. :param container_name: Container name. :type container_name: ``str`` :param object_name: Object name. :type object_name: ``str`` :return: :class:`Object` instance. :rtype: :class:`Object` """ container = self._make_container(container_name) return self._make_object(container, object_name) def get_object_cdn_url(self, obj): """ Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` :return: A CDN URL for this object. :rtype: ``str`` """ return os.path.join(self.base_path, obj.container.name, obj.name) def enable_container_cdn(self, container): """ Enable container CDN. :param container: Container instance :type container: :class:`Container` :rtype: ``bool`` """ path = self.get_container_cdn_url(container) lockfile.MkdirFileLock(path, threaded=True) with LockLocalStorage(path): self._make_path(path) return True def enable_object_cdn(self, obj): """ Enable object CDN. :param obj: Object instance :type obj: :class:`Object` :rtype: ``bool`` """ path = self.get_object_cdn_url(obj) with LockLocalStorage(path): if os.path.exists(path): return False try: obj_file = open(path, 'w') obj_file.close() except: return False return True def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): """ Download an object to the specified destination path. :param obj: Object instance. :type obj: :class:`Object` :param destination_path: Full path to a file or a directory where the incoming file will be saved. :type destination_path: ``str`` :param overwrite_existing: True to overwrite an existing file, defaults to False. :type overwrite_existing: ``bool`` :param delete_on_failure: True to delete a partially downloaded file if the download was not successful (hash mismatch / file size). :type delete_on_failure: ``bool`` :return: True if an object has been successfully downloaded, False otherwise. :rtype: ``bool`` """ obj_path = self.get_object_cdn_url(obj) base_name = os.path.basename(destination_path) if not base_name and not os.path.exists(destination_path): raise LibcloudError( value='Path %s does not exist' % (destination_path), driver=self) if not base_name: file_path = os.path.join(destination_path, obj.name) else: file_path = destination_path if os.path.exists(file_path) and not overwrite_existing: raise LibcloudError( value='File %s already exists, but ' % (file_path) + 'overwrite_existing=False', driver=self) try: shutil.copy(obj_path, file_path) except IOError: if delete_on_failure: try: os.unlink(file_path) except Exception: pass return False return True def download_object_as_stream(self, obj, chunk_size=None): """ Return a generator which yields object data. :param obj: Object instance :type obj: :class:`Object` :param chunk_size: Optional chunk size (in bytes). :type chunk_size: ``int`` :return: A stream of binary chunks of data. :rtype: ``object`` """ path = self.get_object_cdn_url(obj) with open(path, 'rb') as obj_file: for data in read_in_chunks(obj_file, chunk_size=chunk_size): yield data def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True): """ Upload an object currently located on a disk. :param file_path: Path to the object on disk. :type file_path: ``str`` :param container: Destination container. :type container: :class:`Container` :param object_name: Object name. :type object_name: ``str`` :param verify_hash: Verify hast :type verify_hash: ``bool`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: ``object`` """ path = self.get_container_cdn_url(container, check=True) obj_path = os.path.join(path, object_name) base_path = os.path.dirname(obj_path) self._make_path(base_path) with LockLocalStorage(obj_path): shutil.copy(file_path, obj_path) os.chmod(obj_path, int('664', 8)) return self._make_object(container, object_name) def upload_object_via_stream(self, iterator, container, object_name, extra=None): """ Upload an object using an iterator. If a provider supports it, chunked transfer encoding is used and you don't need to know in advance the amount of data to be uploaded. Otherwise if a provider doesn't support it, iterator will be exhausted so a total size for data to be uploaded can be determined. Note: Exhausting the iterator means that the whole data must be buffered in memory which might result in memory exhausting when uploading a very large object. If a file is located on a disk you are advised to use upload_object function which uses fs.stat function to determine the file size and it doesn't need to buffer whole object in the memory. :type iterator: ``object`` :param iterator: An object which implements the iterator interface and yields binary chunks of data. :type container: :class:`Container` :param container: Destination container. :type object_name: ``str`` :param object_name: Object name. :type extra: ``dict`` :param extra: (optional) Extra attributes (driver specific). Note: This dictionary must contain a 'content_type' key which represents a content type of the stored object. :rtype: ``object`` """ path = self.get_container_cdn_url(container, check=True) obj_path = os.path.join(path, object_name) base_path = os.path.dirname(obj_path) self._make_path(base_path) with LockLocalStorage(obj_path): with open(obj_path, 'wb') as obj_file: for data in iterator: obj_file.write(data) os.chmod(obj_path, int('664', 8)) return self._make_object(container, object_name) def delete_object(self, obj): """ Delete an object. :type obj: :class:`Object` :param obj: Object instance. :return: ``bool`` True on success. :rtype: ``bool`` """ path = self.get_object_cdn_url(obj) with LockLocalStorage(path): try: os.unlink(path) except Exception: return False # Check and delete all the empty parent folders path = os.path.dirname(path) container_url = obj.container.get_cdn_url() # Delete the empty parent folders till the container's level while path != container_url: try: os.rmdir(path) except OSError: exp = sys.exc_info()[1] if exp.errno == errno.ENOTEMPTY: break raise exp path = os.path.dirname(path) return True def create_container(self, container_name): """ Create a new container. :type container_name: ``str`` :param container_name: Container name. :return: :class:`Container` instance on success. :rtype: :class:`Container` """ self._check_container_name(container_name) path = os.path.join(self.base_path, container_name) try: self._make_path(path, ignore_existing=False) except OSError: exp = sys.exc_info()[1] if exp.errno == errno.EEXIST: raise ContainerAlreadyExistsError( value='Container with this name already exists. The name ' 'must be unique among all the containers in the ' 'system', container_name=container_name, driver=self) else: raise LibcloudError( 'Error creating container %s' % container_name, driver=self) except Exception: raise LibcloudError( 'Error creating container %s' % container_name, driver=self) return self._make_container(container_name) def delete_container(self, container): """ Delete a container. :type container: :class:`Container` :param container: Container instance :return: True on success, False otherwise. :rtype: ``bool`` """ # Check if there are any objects inside this for obj in self._get_objects(container): raise ContainerIsNotEmptyError(value='Container is not empty', container_name=container.name, driver=self) path = self.get_container_cdn_url(container, check=True) with LockLocalStorage(path): try: shutil.rmtree(path) except Exception: return False return True apache-libcloud-2.2.1/libcloud/storage/drivers/atmos.py0000664000175000017500000004115613153541406023043 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import base64 import hashlib import hmac import time from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.utils.py3 import next from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import urlunquote if PY3: from io import FileIO as file from libcloud.utils.files import read_in_chunks, guess_file_mime_type from libcloud.common.base import ConnectionUserAndKey, XmlResponse from libcloud.common.types import LibcloudError from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE from libcloud.storage.types import ContainerAlreadyExistsError, \ ContainerDoesNotExistError, ContainerIsNotEmptyError, \ ObjectDoesNotExistError def collapse(s): return ' '.join([x for x in s.split(' ') if x]) class AtmosError(LibcloudError): def __init__(self, code, message, driver=None): super(AtmosError, self).__init__(value=message, driver=driver) self.code = code class AtmosResponse(XmlResponse): def success(self): return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.PARTIAL_CONTENT) def parse_error(self): tree = self.parse_body() if tree is None: return None code = int(tree.find('Code').text) message = tree.find('Message').text raise AtmosError(code=code, message=message, driver=self.connection.driver) class AtmosConnection(ConnectionUserAndKey): responseCls = AtmosResponse def add_default_headers(self, headers): headers['x-emc-uid'] = self.user_id headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) headers['x-emc-date'] = headers['Date'] if 'Content-Type' not in headers: headers['Content-Type'] = 'application/octet-stream' if 'Accept' not in headers: headers['Accept'] = '*/*' return headers def pre_connect_hook(self, params, headers): headers['x-emc-signature'] = self._calculate_signature(params, headers) return params, headers def _calculate_signature(self, params, headers): pathstring = urlunquote(self.action) if pathstring.startswith(self.driver.path): pathstring = pathstring[len(self.driver.path):] if params: if type(params) is dict: params = list(params.items()) pathstring += '?' + urlencode(params) pathstring = pathstring.lower() xhdrs = [(k, v) for k, v in list(headers.items()) if k.startswith('x-emc-')] xhdrs.sort(key=lambda x: x[0]) signature = [ self.method, headers.get('Content-Type', ''), headers.get('Range', ''), headers.get('Date', ''), pathstring, ] signature.extend([k + ':' + collapse(v) for k, v in xhdrs]) signature = '\n'.join(signature) key = base64.b64decode(self.key) signature = hmac.new(b(key), b(signature), hashlib.sha1).digest() return base64.b64encode(b(signature)).decode('utf-8') class AtmosDriver(StorageDriver): connectionCls = AtmosConnection host = None path = None api_name = 'atmos' supports_chunked_encoding = True website = 'http://atmosonline.com/' name = 'atmos' DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week def __init__(self, key, secret=None, secure=True, host=None, port=None): host = host or self.host super(AtmosDriver, self).__init__(key, secret, secure, host, port) def iterate_containers(self): result = self.connection.request(self._namespace_path('')) entries = self._list_objects(result.object, object_type='directory') for entry in entries: extra = { 'object_id': entry['id'] } yield Container(entry['name'], extra, self) def get_container(self, container_name): path = self._namespace_path(container_name) + '/?metadata/system' try: result = self.connection.request(path) except AtmosError: e = sys.exc_info()[1] if e.code != 1003: raise raise ContainerDoesNotExistError(e, self, container_name) meta = self._emc_meta(result) extra = { 'object_id': meta['objectid'] } return Container(container_name, extra, self) def create_container(self, container_name): path = self._namespace_path(container_name) + '/' try: self.connection.request(path, method='POST') except AtmosError: e = sys.exc_info()[1] if e.code != 1016: raise raise ContainerAlreadyExistsError(e, self, container_name) return self.get_container(container_name) def delete_container(self, container): try: self.connection.request(self._namespace_path(container.name) + '/', method='DELETE') except AtmosError: e = sys.exc_info()[1] if e.code == 1003: raise ContainerDoesNotExistError(e, self, container.name) elif e.code == 1023: raise ContainerIsNotEmptyError(e, self, container.name) return True def get_object(self, container_name, object_name): container = self.get_container(container_name) object_name_cleaned = self._clean_object_name(object_name) path = self._namespace_path(container_name) + '/' + object_name_cleaned try: result = self.connection.request(path + '?metadata/system') system_meta = self._emc_meta(result) result = self.connection.request(path + '?metadata/user') user_meta = self._emc_meta(result) except AtmosError: e = sys.exc_info()[1] if e.code != 1003: raise raise ObjectDoesNotExistError(e, self, object_name) last_modified = time.strptime(system_meta['mtime'], '%Y-%m-%dT%H:%M:%SZ') last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT', last_modified) extra = { 'object_id': system_meta['objectid'], 'last_modified': last_modified } data_hash = user_meta.pop('md5', '') return Object(object_name, int(system_meta['size']), data_hash, extra, user_meta, container, self) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True): method = 'PUT' extra = extra or {} object_name_cleaned = self._clean_object_name(object_name) request_path = self._namespace_path(container.name) + '/' +\ object_name_cleaned content_type = extra.get('content_type', None) try: self.connection.request(request_path + '?metadata/system') except AtmosError: e = sys.exc_info()[1] if e.code != 1003: raise method = 'POST' result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method=method, headers={}, file_path=file_path) bytes_transferred = result_dict['bytes_transferred'] if extra is None: meta_data = {} else: meta_data = extra.get('meta_data', {}) meta_data['md5'] = result_dict['data_hash'] user_meta = ', '.join([k + '=' + str(v) for k, v in list(meta_data.items())]) self.connection.request(request_path + '?metadata/user', method='POST', headers={'x-emc-meta': user_meta}) result = self.connection.request(request_path + '?metadata/system') meta = self._emc_meta(result) del meta_data['md5'] extra = { 'object_id': meta['objectid'], 'meta_data': meta_data, } return Object(object_name, bytes_transferred, result_dict['data_hash'], extra, meta_data, container, self) def upload_object_via_stream(self, iterator, container, object_name, extra=None): if isinstance(iterator, file): iterator = iter(iterator) data_hash = hashlib.md5() generator = read_in_chunks(iterator, CHUNK_SIZE, True) bytes_transferred = 0 try: chunk = next(generator) except StopIteration: chunk = '' path = self._namespace_path(container.name + '/' + object_name) method = 'PUT' if extra is not None: content_type = extra.get('content_type', None) else: content_type = None if not content_type: content_type, _ = guess_file_mime_type(object_name) if not content_type: raise AttributeError( 'File content-type could not be guessed and' + ' no content_type value provided') try: self.connection.request(path + '?metadata/system') except AtmosError: e = sys.exc_info()[1] if e.code != 1003: raise method = 'POST' while True: end = bytes_transferred + len(chunk) - 1 data_hash.update(b(chunk)) headers = { 'x-emc-meta': 'md5=' + data_hash.hexdigest(), 'Content-Type': content_type, } if len(chunk) > 0 and bytes_transferred > 0: headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end) method = 'PUT' result = self.connection.request(path, method=method, data=chunk, headers=headers) bytes_transferred += len(chunk) try: chunk = next(generator) except StopIteration: break if len(chunk) == 0: break data_hash = data_hash.hexdigest() if extra is None: meta_data = {} else: meta_data = extra.get('meta_data', {}) meta_data['md5'] = data_hash user_meta = ', '.join([k + '=' + str(v) for k, v in list(meta_data.items())]) self.connection.request(path + '?metadata/user', method='POST', headers={'x-emc-meta': user_meta}) result = self.connection.request(path + '?metadata/system') meta = self._emc_meta(result) extra = { 'object_id': meta['objectid'], 'meta_data': meta_data, } return Object(object_name, bytes_transferred, data_hash, extra, meta_data, container, self) def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): path = self._namespace_path(obj.container.name + '/' + obj.name) response = self.connection.request(path, method='GET', raw=True) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure }, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): path = self._namespace_path(obj.container.name + '/' + obj.name) response = self.connection.request(path, method='GET', raw=True) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={ 'iterator': response.response, 'chunk_size': chunk_size }, success_status_code=httplib.OK) def delete_object(self, obj): path = self._namespace_path(obj.container.name) + '/' +\ self._clean_object_name(obj.name) try: self.connection.request(path, method='DELETE') except AtmosError: e = sys.exc_info()[1] if e.code != 1003: raise raise ObjectDoesNotExistError(e, self, obj.name) return True def enable_object_cdn(self, obj): return True def get_object_cdn_url(self, obj, expiry=None, use_object=False): """ Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` :param expiry: Expiry :type expiry: ``str`` :param use_object: Use object :type use_object: ``bool`` :rtype: ``str`` """ if use_object: path = '/rest/objects' + obj.meta_data['object_id'] else: path = '/rest/namespace/' + obj.container.name + '/' + obj.name if self.secure: protocol = 'https' else: protocol = 'http' expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL) params = [ ('uid', self.key), ('expires', expiry), ] params.append(('signature', self._cdn_signature(path, params, expiry))) params = urlencode(params) path = self.path + path return urlparse.urlunparse((protocol, self.host, path, '', params, '')) def _cdn_signature(self, path, params, expiry): key = base64.b64decode(self.secret) signature = '\n'.join(['GET', path.lower(), self.key, expiry]) signature = hmac.new(key, signature, hashlib.sha1).digest() return base64.b64encode(signature) def _list_objects(self, tree, object_type=None): listing = tree.find(self._emc_tag('DirectoryList')) entries = [] for entry in listing.findall(self._emc_tag('DirectoryEntry')): file_type = entry.find(self._emc_tag('FileType')).text if object_type is not None and object_type != file_type: continue entries.append({ 'id': entry.find(self._emc_tag('ObjectID')).text, 'type': file_type, 'name': entry.find(self._emc_tag('Filename')).text }) return entries def _clean_object_name(self, name): return urlquote(name.encode('ascii')) def _namespace_path(self, path): return self.path + '/rest/namespace/' + urlquote(path.encode('ascii')) def _object_path(self, object_id): return self.path + '/rest/objects/' + object_id.encode('ascii') @staticmethod def _emc_tag(tag): return '{http://www.emc.com/cos/}' + tag def _emc_meta(self, response): meta = response.headers.get('x-emc-meta', '') if len(meta) == 0: return {} meta = meta.split(', ') return dict([x.split('=', 1) for x in meta]) def iterate_container_objects(self, container): headers = {'x-emc-include-meta': '1'} path = self._namespace_path(container.name) + '/' result = self.connection.request(path, headers=headers) entries = self._list_objects(result.object, object_type='regular') for entry in entries: metadata = {'object_id': entry['id']} yield Object(entry['name'], 0, '', {}, metadata, container, self) apache-libcloud-2.2.1/libcloud/storage/base.py0000664000175000017500000005377013160235057021161 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides base classes for working with storage """ # Backward compatibility for Python 2.5 from __future__ import with_statement import os.path # pylint: disable-msg=W0404 import hashlib from os.path import join as pjoin from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b import libcloud.utils.files from libcloud.common.types import LibcloudError from libcloud.common.base import ConnectionUserAndKey, BaseDriver from libcloud.storage.types import ObjectDoesNotExistError __all__ = [ 'Object', 'Container', 'StorageDriver', 'CHUNK_SIZE', 'DEFAULT_CONTENT_TYPE' ] CHUNK_SIZE = 8096 # Default Content-Type which is sent when uploading an object if one is not # supplied and can't be detected when using non-strict mode. DEFAULT_CONTENT_TYPE = 'application/octet-stream' class Object(object): """ Represents an object (BLOB). """ def __init__(self, name, size, hash, extra, meta_data, container, driver): """ :param name: Object name (must be unique per container). :type name: ``str`` :param size: Object size in bytes. :type size: ``int`` :param hash: Object hash. :type hash: ``str`` :param container: Object container. :type container: :class:`Container` :param extra: Extra attributes. :type extra: ``dict`` :param meta_data: Optional object meta data. :type meta_data: ``dict`` :param driver: StorageDriver instance. :type driver: :class:`StorageDriver` """ self.name = name self.size = size self.hash = hash self.container = container self.extra = extra or {} self.meta_data = meta_data or {} self.driver = driver def get_cdn_url(self): return self.driver.get_object_cdn_url(obj=self) def enable_cdn(self, **kwargs): return self.driver.enable_object_cdn(obj=self, **kwargs) def download(self, destination_path, overwrite_existing=False, delete_on_failure=True): return self.driver.download_object(self, destination_path, overwrite_existing, delete_on_failure) def as_stream(self, chunk_size=None): return self.driver.download_object_as_stream(self, chunk_size) def delete(self): return self.driver.delete_object(self) def __repr__(self): return ('' % (self.name, self.size, self.hash, self.driver.name)) class Container(object): """ Represents a container (bucket) which can hold multiple objects. """ def __init__(self, name, extra, driver): """ :param name: Container name (must be unique). :type name: ``str`` :param extra: Extra attributes. :type extra: ``dict`` :param driver: StorageDriver instance. :type driver: :class:`StorageDriver` """ self.name = name self.extra = extra or {} self.driver = driver def iterate_objects(self): return self.driver.iterate_container_objects(container=self) def list_objects(self): return self.driver.list_container_objects(container=self) def get_cdn_url(self): return self.driver.get_container_cdn_url(container=self) def enable_cdn(self, **kwargs): return self.driver.enable_container_cdn(container=self, **kwargs) def get_object(self, object_name): return self.driver.get_object(container_name=self.name, object_name=object_name) def upload_object(self, file_path, object_name, extra=None, **kwargs): return self.driver.upload_object( file_path, self, object_name, extra=extra, **kwargs) def upload_object_via_stream(self, iterator, object_name, extra=None, **kwargs): return self.driver.upload_object_via_stream( iterator, self, object_name, extra=extra, **kwargs) def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): return self.driver.download_object( obj, destination_path, overwrite_existing=overwrite_existing, delete_on_failure=delete_on_failure) def download_object_as_stream(self, obj, chunk_size=None): return self.driver.download_object_as_stream(obj, chunk_size) def delete_object(self, obj): return self.driver.delete_object(obj) def delete(self): return self.driver.delete_container(self) def __repr__(self): return ('' % (self.name, self.driver.name)) class StorageDriver(BaseDriver): """ A base StorageDriver to derive from. """ connectionCls = ConnectionUserAndKey name = None hash_type = 'md5' supports_chunked_encoding = False # When strict mode is used, exception will be thrown if no content type is # provided and none can be detected when uploading an object strict_mode = False def iterate_containers(self): """ Return a generator of containers for the given account :return: A generator of Container instances. :rtype: ``generator`` of :class:`Container` """ raise NotImplementedError( 'iterate_containers not implemented for this driver') def list_containers(self): """ Return a list of containers. :return: A list of Container instances. :rtype: ``list`` of :class:`Container` """ return list(self.iterate_containers()) def iterate_container_objects(self, container): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ raise NotImplementedError( 'iterate_container_objects not implemented for this driver') def list_container_objects(self, container): """ Return a list of objects for the given container. :param container: Container instance. :type container: :class:`Container` :return: A list of Object instances. :rtype: ``list`` of :class:`Object` """ return list(self.iterate_container_objects(container)) def get_container(self, container_name): """ Return a container instance. :param container_name: Container name. :type container_name: ``str`` :return: :class:`Container` instance. :rtype: :class:`Container` """ raise NotImplementedError( 'get_object not implemented for this driver') def get_container_cdn_url(self, container): """ Return a container CDN URL. :param container: Container instance :type container: :class:`Container` :return: A CDN URL for this container. :rtype: ``str`` """ raise NotImplementedError( 'get_container_cdn_url not implemented for this driver') def get_object(self, container_name, object_name): """ Return an object instance. :param container_name: Container name. :type container_name: ``str`` :param object_name: Object name. :type object_name: ``str`` :return: :class:`Object` instance. :rtype: :class:`Object` """ raise NotImplementedError( 'get_object not implemented for this driver') def get_object_cdn_url(self, obj): """ Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` :return: A CDN URL for this object. :rtype: ``str`` """ raise NotImplementedError( 'get_object_cdn_url not implemented for this driver') def enable_container_cdn(self, container): """ Enable container CDN. :param container: Container instance :type container: :class:`Container` :rtype: ``bool`` """ raise NotImplementedError( 'enable_container_cdn not implemented for this driver') def enable_object_cdn(self, obj): """ Enable object CDN. :param obj: Object instance :type obj: :class:`Object` :rtype: ``bool`` """ raise NotImplementedError( 'enable_object_cdn not implemented for this driver') def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): """ Download an object to the specified destination path. :param obj: Object instance. :type obj: :class:`Object` :param destination_path: Full path to a file or a directory where the incoming file will be saved. :type destination_path: ``str`` :param overwrite_existing: True to overwrite an existing file, defaults to False. :type overwrite_existing: ``bool`` :param delete_on_failure: True to delete a partially downloaded file if the download was not successful (hash mismatch / file size). :type delete_on_failure: ``bool`` :return: True if an object has been successfully downloaded, False otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'download_object not implemented for this driver') def download_object_as_stream(self, obj, chunk_size=None): """ Return a generator which yields object data. :param obj: Object instance :type obj: :class:`Object` :param chunk_size: Optional chunk size (in bytes). :type chunk_size: ``int`` """ raise NotImplementedError( 'download_object_as_stream not implemented for this driver') def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, headers=None): """ Upload an object currently located on a disk. :param file_path: Path to the object on disk. :type file_path: ``str`` :param container: Destination container. :type container: :class:`Container` :param object_name: Object name. :type object_name: ``str`` :param verify_hash: Verify hash :type verify_hash: ``bool`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :param headers: (optional) Additional request headers, such as CORS headers. For example: headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'} :type headers: ``dict`` :rtype: :class:`Object` """ raise NotImplementedError( 'upload_object not implemented for this driver') def upload_object_via_stream(self, iterator, container, object_name, extra=None, headers=None): """ Upload an object using an iterator. If a provider supports it, chunked transfer encoding is used and you don't need to know in advance the amount of data to be uploaded. Otherwise if a provider doesn't support it, iterator will be exhausted so a total size for data to be uploaded can be determined. Note: Exhausting the iterator means that the whole data must be buffered in memory which might result in memory exhausting when uploading a very large object. If a file is located on a disk you are advised to use upload_object function which uses fs.stat function to determine the file size and it doesn't need to buffer whole object in the memory. :param iterator: An object which implements the iterator interface. :type iterator: :class:`object` :param container: Destination container. :type container: :class:`Container` :param object_name: Object name. :type object_name: ``str`` :param extra: (optional) Extra attributes (driver specific). Note: This dictionary must contain a 'content_type' key which represents a content type of the stored object. :type extra: ``dict`` :param headers: (optional) Additional request headers, such as CORS headers. For example: headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'} :type headers: ``dict`` :rtype: ``object`` """ raise NotImplementedError( 'upload_object_via_stream not implemented for this driver') def delete_object(self, obj): """ Delete an object. :param obj: Object instance. :type obj: :class:`Object` :return: ``bool`` True on success. :rtype: ``bool`` """ raise NotImplementedError( 'delete_object not implemented for this driver') def create_container(self, container_name): """ Create a new container. :param container_name: Container name. :type container_name: ``str`` :return: Container instance on success. :rtype: :class:`Container` """ raise NotImplementedError( 'create_container not implemented for this driver') def delete_container(self, container): """ Delete a container. :param container: Container instance :type container: :class:`Container` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'delete_container not implemented for this driver') def _get_object(self, obj, callback, callback_kwargs, response, success_status_code=None): """ Call passed callback and start transfer of the object' :param obj: Object instance. :type obj: :class:`Object` :param callback: Function which is called with the passed callback_kwargs :type callback: :class:`function` :param callback_kwargs: Keyword arguments which are passed to the callback. :type callback_kwargs: ``dict`` :param response: Response instance. :type response: :class:`Response` :param success_status_code: Status code which represents a successful transfer (defaults to httplib.OK) :type success_status_code: ``int`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ success_status_code = success_status_code or httplib.OK if response.status == success_status_code: return callback(**callback_kwargs) elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(object_name=obj.name, value='', driver=self) raise LibcloudError(value='Unexpected status code: %s' % (response.status), driver=self) def _save_object(self, response, obj, destination_path, overwrite_existing=False, delete_on_failure=True, chunk_size=None): """ Save object to the provided path. :param response: RawResponse instance. :type response: :class:`RawResponse` :param obj: Object instance. :type obj: :class:`Object` :param destination_path: Destination directory. :type destination_path: ``str`` :param delete_on_failure: True to delete partially downloaded object if the download fails. :type delete_on_failure: ``bool`` :param overwrite_existing: True to overwrite a local path if it already exists. :type overwrite_existing: ``bool`` :param chunk_size: Optional chunk size (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb) :type chunk_size: ``int`` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ chunk_size = chunk_size or CHUNK_SIZE base_name = os.path.basename(destination_path) if not base_name and not os.path.exists(destination_path): raise LibcloudError( value='Path %s does not exist' % (destination_path), driver=self) if not base_name: file_path = pjoin(destination_path, obj.name) else: file_path = destination_path if os.path.exists(file_path) and not overwrite_existing: raise LibcloudError( value='File %s already exists, but ' % (file_path) + 'overwrite_existing=False', driver=self) bytes_transferred = 0 with open(file_path, 'wb') as file_handle: for chunk in response._response.iter_content(chunk_size): file_handle.write(b(chunk)) bytes_transferred += len(chunk) if int(obj.size) != int(bytes_transferred): # Transfer failed, support retry? if delete_on_failure: try: os.unlink(file_path) except Exception: pass return False return True def _upload_object(self, object_name, content_type, request_path, request_method='PUT', headers=None, file_path=None, stream=None, upload_func=None, upload_func_kwargs=None, chunked=False, multipart=False): """ Helper function for setting common request headers and calling the passed in callback which uploads an object. """ headers = headers or {} if file_path and not os.path.exists(file_path): raise OSError('File %s does not exist' % (file_path)) if stream is not None and not hasattr(stream, 'next') and not \ hasattr(stream, '__next__'): raise AttributeError('iterator object must implement next() ' + 'method.') if not content_type: if file_path: name = file_path else: name = object_name content_type, _ = libcloud.utils.files.guess_file_mime_type(name) if not content_type: if self.strict_mode: raise AttributeError('File content-type could not be ' 'guessed and no content_type value ' 'is provided') else: # Fallback to a content-type content_type = DEFAULT_CONTENT_TYPE headers['Content-Type'] = content_type if stream: response = self.connection.request( request_path, method=request_method, data=stream, headers=headers, raw=True) stream_hash, stream_length = self._hash_buffered_stream( stream, self._get_hash_function()) else: with open(file_path, 'rb') as file_stream: response = self.connection.request( request_path, method=request_method, data=file_stream, headers=headers, raw=True) with open(file_path, 'rb') as file_stream: stream_hash, stream_length = self._hash_buffered_stream( file_stream, self._get_hash_function()) if not response.success(): response.parse_error() if upload_func: upload_func(**upload_func_kwargs) return {'response': response, 'bytes_transferred': stream_length, 'data_hash': stream_hash} def _hash_buffered_stream(self, stream, hasher, blocksize=65536): total_len = 0 if hasattr(stream, '__next__'): data = libcloud.utils.files.exhaust_iterator(iterator=stream) hasher.update(b(data)) total_len = len(data) return (hasher.hexdigest(), total_len) if not hasattr(stream, '__exit__'): for s in stream: hasher.update(s) total_len = total_len + len(s) return (hasher.hexdigest(), total_len) with stream: buf = stream.read(blocksize) while len(buf) > 0: total_len = total_len + len(buf) hasher.update(buf) buf = stream.read(blocksize) return (hasher.hexdigest(), total_len) def _get_hash_function(self): """ Return instantiated hash function for the hash type supported by the provider. """ try: func = getattr(hashlib, self.hash_type)() except AttributeError: raise RuntimeError('Invalid or unsupported hash type: %s' % (self.hash_type)) return func apache-libcloud-2.2.1/libcloud/compute/0000775000175000017500000000000013160535107017670 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/compute/providers.py0000664000175000017500000001512013153541406022257 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provider related utilities """ from libcloud.compute.types import Provider from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING from libcloud.compute.deprecated import DEPRECATED_DRIVERS __all__ = [ "Provider", "DRIVERS", "get_driver"] DRIVERS = { Provider.AZURE: ('libcloud.compute.drivers.azure', 'AzureNodeDriver'), Provider.AZURE_ARM: ('libcloud.compute.drivers.azure_arm', 'AzureNodeDriver'), Provider.DUMMY: ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), Provider.EC2: ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'), Provider.ECP: ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'), Provider.ELASTICHOSTS: ('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'), Provider.SKALICLOUD: ('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'), Provider.SERVERLOVE: ('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'), Provider.CLOUDSIGMA: ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'), Provider.GCE: ('libcloud.compute.drivers.gce', 'GCENodeDriver'), Provider.GOGRID: ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), Provider.RACKSPACE: ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), Provider.RACKSPACE_FIRST_GEN: ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'), Provider.KILI: ('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'), Provider.VPSNET: ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), Provider.LINODE: ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'), Provider.RIMUHOSTING: ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'), Provider.VOXEL: ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'), Provider.SOFTLAYER: ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'), Provider.EUCALYPTUS: ('libcloud.compute.drivers.ec2', 'EucNodeDriver'), Provider.OPENNEBULA: ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'), Provider.BRIGHTBOX: ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'), Provider.NIMBUS: ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'), Provider.BLUEBOX: ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'), Provider.GANDI: ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'), Provider.DIMENSIONDATA: ('libcloud.compute.drivers.dimensiondata', 'DimensionDataNodeDriver'), Provider.OPENSTACK: ('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'), Provider.VCLOUD: ('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'), Provider.TERREMARK: ('libcloud.compute.drivers.vcloud', 'TerremarkDriver'), Provider.CLOUDSTACK: ('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'), Provider.LIBVIRT: ('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'), Provider.JOYENT: ('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'), Provider.VCL: ('libcloud.compute.drivers.vcl', 'VCLNodeDriver'), Provider.KTUCLOUD: ('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'), Provider.HOSTVIRTUAL: ('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'), Provider.ABIQUO: ('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'), Provider.DIGITAL_OCEAN: ('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'), Provider.NEPHOSCALE: ('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'), Provider.EXOSCALE: ('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'), Provider.IKOULA: ('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'), Provider.OUTSCALE_SAS: ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'), Provider.OUTSCALE_INC: ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'), Provider.VSPHERE: ('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'), Provider.PROFIT_BRICKS: ('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'), Provider.VULTR: ('libcloud.compute.drivers.vultr', 'VultrNodeDriver'), Provider.AURORACOMPUTE: ('libcloud.compute.drivers.auroracompute', 'AuroraComputeNodeDriver'), Provider.CLOUDWATT: ('libcloud.compute.drivers.cloudwatt', 'CloudwattNodeDriver'), Provider.PACKET: ('libcloud.compute.drivers.packet', 'PacketNodeDriver'), Provider.ONAPP: ('libcloud.compute.drivers.onapp', 'OnAppNodeDriver'), Provider.OVH: ('libcloud.compute.drivers.ovh', 'OvhNodeDriver'), Provider.INTERNETSOLUTIONS: ('libcloud.compute.drivers.internetsolutions', 'InternetSolutionsNodeDriver'), Provider.INDOSAT: ('libcloud.compute.drivers.indosat', 'IndosatNodeDriver'), Provider.MEDONE: ('libcloud.compute.drivers.medone', 'MedOneNodeDriver'), Provider.BSNL: ('libcloud.compute.drivers.bsnl', 'BSNLNodeDriver'), Provider.NTTA: ('libcloud.compute.drivers.ntta', 'NTTAmericaNodeDriver'), Provider.ALIYUN_ECS: ('libcloud.compute.drivers.ecs', 'ECSDriver'), Provider.CLOUDSCALE: ('libcloud.compute.drivers.cloudscale', 'CloudscaleNodeDriver'), Provider.ONEANDONE: ('libcloud.compute.drivers.oneandone', 'OneAndOneNodeDriver'), } def get_driver(provider): deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING return _get_provider_driver(drivers=DRIVERS, provider=provider, deprecated_providers=DEPRECATED_DRIVERS, deprecated_constants=deprecated_constants) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/compute/__init__.py0000664000175000017500000000005612701023453021776 0ustar kamikami00000000000000""" Module for working with Cloud Servers """ apache-libcloud-2.2.1/libcloud/compute/deprecated.py0000664000175000017500000000447713153541406022357 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Database of deprecated drivers """ from libcloud.compute.types import Provider DEPRECATED_DRIVERS = { Provider.OPSOURCE: { 'reason': 'OpSource cloud is now part of Dimension Data, ' 'use the DIMENSIONDATA provider instead.', 'url': 'http://www.ntt.co.jp/news2011/1107e/110701a.html' }, Provider.NINEFOLD: { 'reason': 'We will shortly notify our customers that we ' 'will be sunsetting our Public Cloud Computing ' '(Server) platform, the last day of operation ' 'being January 30, 2016', 'url': 'https://ninefold.com/news/' }, Provider.IBM: { 'reason': 'IBM SmartCloud Enterprise has been deprecated ' 'in favour of IBM SoftLayer Public Cloud, please' ' use the SOFTLAYER provider.', 'url': 'http://www.ibm.com/midmarket/us/en/article_cloud6_1310.html' }, Provider.HPCLOUD: { 'reason': 'HP Helion Public Cloud was shut down in January 2016.', 'url': 'http://libcloud.apache.org/blog/' '2016/02/16/new-drivers-deprecated-drivers.html' }, Provider.CLOUDFRAMES: { 'reason': 'The CloudFrames Provider is no longer supported', 'url': 'http://libcloud.apache.org/blog/2016/02/16/new-drivers-' 'deprecated-drivers.html' }, Provider.RUNABOVE: { 'reason': 'The RunAbove compute is no longer supported. ' 'Use the OVH one instead.', 'url': 'https://www.runabove.com/cloud-instance.xml' } } apache-libcloud-2.2.1/libcloud/compute/deployment.py0000664000175000017500000001770112701023453022424 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides generic deployment steps for machines post boot. """ from __future__ import with_statement import os import binascii from libcloud.utils.py3 import basestring, PY3 class Deployment(object): """ Base class for deployment tasks. """ def run(self, node, client): """ Runs this deployment task on node using the client provided. :type node: :class:`Node` :keyword node: Node to operate one :type client: :class:`BaseSSHClient` :keyword client: Connected SSH client to use. :return: :class:`Node` """ raise NotImplementedError( 'run not implemented for this deployment') def _get_string_value(self, argument_name, argument_value): if not isinstance(argument_value, basestring) and \ not hasattr(argument_value, 'read'): raise TypeError('%s argument must be a string or a file-like ' 'object' % (argument_name)) if hasattr(argument_value, 'read'): argument_value = argument_value.read() return argument_value class SSHKeyDeployment(Deployment): """ Installs a public SSH Key onto a server. """ def __init__(self, key): """ :type key: ``str`` or :class:`File` object :keyword key: Contents of the public key write or a file object which can be read. """ self.key = self._get_string_value(argument_name='key', argument_value=key) def run(self, node, client): """ Installs SSH key into ``.ssh/authorized_keys`` See also :class:`Deployment.run` """ client.put(".ssh/authorized_keys", contents=self.key, mode='a') return node class FileDeployment(Deployment): """ Installs a file on the server. """ def __init__(self, source, target): """ :type source: ``str`` :keyword source: Local path of file to be installed :type target: ``str`` :keyword target: Path to install file on node """ self.source = source self.target = target def run(self, node, client): """ Upload the file, retaining permissions. See also :class:`Deployment.run` """ perms = int(oct(os.stat(self.source).st_mode)[4:], 8) with open(self.source, 'rb') as fp: content = fp.read() client.put(path=self.target, chmod=perms, contents=content) return node class ScriptDeployment(Deployment): """ Runs an arbitrary shell script on the server. This step works by first writing the content of the shell script (script argument) in a \*.sh file on a remote server and then running that file. If you are running a non-shell script, make sure to put the appropriate shebang to the top of the script. You are also advised to do that even if you are running a plan shell script. """ def __init__(self, script, args=None, name=None, delete=False): """ :type script: ``str`` :keyword script: Contents of the script to run. :type args: ``list`` :keyword args: Optional command line arguments which get passed to the deployment script file. :type name: ``str`` :keyword name: Name of the script to upload it as, if not specified, a random name will be chosen. :type delete: ``bool`` :keyword delete: Whether to delete the script on completion. """ script = self._get_string_value(argument_name='script', argument_value=script) self.script = script self.args = args or [] self.stdout = None self.stderr = None self.exit_status = None self.delete = delete self.name = name if self.name is None: # File is put under user's home directory # (~/libcloud_deployment_.sh) random_string = binascii.hexlify(os.urandom(4)) random_string = random_string.decode('ascii') self.name = 'libcloud_deployment_%s.sh' % (random_string) def run(self, node, client): """ Uploads the shell script and then executes it. See also :class:`Deployment.run` """ file_path = client.put(path=self.name, chmod=int('755', 8), contents=self.script) # Pre-pend cwd if user specified a relative path if self.name[0] != '/': base_path = os.path.dirname(file_path) name = os.path.join(base_path, self.name) else: name = self.name cmd = name if self.args: # Append arguments to the command cmd = '%s %s' % (name, ' '.join(self.args)) else: cmd = name self.stdout, self.stderr, self.exit_status = client.run(cmd) if self.delete: client.delete(self.name) return node class ScriptFileDeployment(ScriptDeployment): """ Runs an arbitrary shell script from a local file on the server. Same as ScriptDeployment, except that you can pass in a path to the file instead of the script content. """ def __init__(self, script_file, args=None, name=None, delete=False): """ :type script_file: ``str`` :keyword script_file: Path to a file containing the script to run. :type args: ``list`` :keyword args: Optional command line arguments which get passed to the deployment script file. :type name: ``str`` :keyword name: Name of the script to upload it as, if not specified, a random name will be chosen. :type delete: ``bool`` :keyword delete: Whether to delete the script on completion. """ with open(script_file, 'rb') as fp: content = fp.read() if PY3: content = content.decode('utf-8') super(ScriptFileDeployment, self).__init__(script=content, args=args, name=name, delete=delete) class MultiStepDeployment(Deployment): """ Runs a chain of Deployment steps. """ def __init__(self, add=None): """ :type add: ``list`` :keyword add: Deployment steps to add. """ self.steps = [] self.add(add) def add(self, add): """ Add a deployment to this chain. :type add: Single :class:`Deployment` or a ``list`` of :class:`Deployment` :keyword add: Adds this deployment to the others already in this object. """ if add is not None: add = add if isinstance(add, (list, tuple)) else [add] self.steps.extend(add) def run(self, node, client): """ Run each deployment that has been added. See also :class:`Deployment.run` """ for s in self.steps: node = s.run(node, client) return node apache-libcloud-2.2.1/libcloud/compute/types.py0000664000175000017500000002656413153541406021424 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base types used by other parts of libcloud """ from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.common.types import InvalidCredsError, InvalidCredsException __all__ = [ "Provider", "NodeState", "DeploymentError", "DeploymentException", # @@TR: should the unused imports below be exported? "LibcloudError", "MalformedResponseError", "InvalidCredsError", "InvalidCredsException", "OLD_CONSTANT_TO_NEW_MAPPING" ] class Type(object): @classmethod def tostring(cls, value): """Return the string representation of the state object attribute :param str value: the state object to turn into string :return: the uppercase string that represents the state object :rtype: str """ return value.upper() @classmethod def fromstring(cls, value): """Return the state object attribute that matches the string :param str value: the string to look up :return: the state object attribute that matches the string :rtype: str """ return getattr(cls, value.upper(), None) class Provider(Type): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. :cvar DUMMY: Example provider :cvar ABIQUO: Abiquo driver :cvar ALIYUN_ECS: Aliyun ECS driver. :cvar AURORACOMPUTE: Aurora Compute driver. :cvar AZURE: Azure (classic) driver. :cvar AZURE_ARM: Azure Resource Manager (modern) driver. :cvar BLUEBOX: Bluebox :cvar CLOUDSIGMA: CloudSigma :cvar CLOUDSCALE: cloudscale.ch :cvar CLOUDSTACK: CloudStack :cvar DIMENSIONDATA: Dimension Data Cloud :cvar EC2: Amazon AWS. :cvar ECP: Enomaly :cvar ELASTICHOSTS: ElasticHosts.com :cvar EXOSCALE: Exoscale driver. :cvar GCE: Google Compute Engine :cvar GOGRID: GoGrid :cvar GRIDSPOT: Gridspot driver :cvar IBM: IBM Developer Cloud :cvar IKOULA: Ikoula driver. :cvar JOYENT: Joyent driver :cvar KTUCLOUD: kt ucloud driver :cvar LIBVIRT: Libvirt driver :cvar LINODE: Linode.com :cvar NEPHOSCALE: NephoScale driver :cvar NIMBUS: Nimbus :cvar NINEFOLD: Ninefold :cvar OPENNEBULA: OpenNebula.org :cvar OPSOURCE: Opsource Cloud :cvar OUTSCALE_INC: Outscale INC driver. :cvar OUTSCALE_SAS: Outscale SAS driver. :cvar PROFIT_BRICKS: ProfitBricks driver. :cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers :cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers :cvar RIMUHOSTING: RimuHosting.com :cvar TERREMARK: Terremark :cvar VCL: VCL driver :cvar VCLOUD: vmware vCloud :cvar VPSNET: VPS.net :cvar VULTR: vultr driver. """ AZURE = 'azure' AZURE_ARM = 'azure_arm' DUMMY = 'dummy' ABIQUO = 'abiquo' ALIYUN_ECS = 'aliyun_ecs' AURORACOMPUTE = 'aurora_compute' AZURE = 'azure' BLUEBOX = 'bluebox' BRIGHTBOX = 'brightbox' BSNL = 'bsnl' CISCOCCS = 'ciscoccs' CLOUDFRAMES = 'cloudframes' CLOUDSIGMA = 'cloudsigma' CLOUDSCALE = 'cloudscale' CLOUDSTACK = 'cloudstack' CLOUDWATT = 'cloudwatt' DIGITAL_OCEAN = 'digitalocean' DIMENSIONDATA = 'dimensiondata' EC2 = 'ec2' ECP = 'ecp' ELASTICHOSTS = 'elastichosts' EUCALYPTUS = 'eucalyptus' EXOSCALE = 'exoscale' GANDI = 'gandi' GCE = 'gce' GOGRID = 'gogrid' GRIDSPOT = 'gridspot' HOSTVIRTUAL = 'hostvirtual' IBM = 'ibm' IKOULA = 'ikoula' INDOSAT = 'indosat' INTERNETSOLUTIONS = 'internetsolutions' JOYENT = 'joyent' KTUCLOUD = 'ktucloud' LIBVIRT = 'libvirt' LINODE = 'linode' MEDONE = 'medone' NEPHOSCALE = 'nephoscale' NIMBUS = 'nimbus' NINEFOLD = 'ninefold' NTTA = 'ntta' ONEANDONE = 'oneandone' OPENNEBULA = 'opennebula' OPENSTACK = 'openstack' OPSOURCE = 'opsource' OUTSCALE_INC = 'outscale_inc' OUTSCALE_SAS = 'outscale_sas' OVH = 'ovh' PACKET = 'packet' PROFIT_BRICKS = 'profitbricks' RACKSPACE = 'rackspace' RACKSPACE_FIRST_GEN = 'rackspace_first_gen' RIMUHOSTING = 'rimuhosting' RUNABOVE = 'runabove' SERVERLOVE = 'serverlove' SKALICLOUD = 'skalicloud' SOFTLAYER = 'softlayer' TERREMARK = 'terremark' VCL = 'vcl' VCLOUD = 'vcloud' VOXEL = 'voxel' VPSNET = 'vpsnet' VSPHERE = 'vsphere' VULTR = 'vultr' # OpenStack based providers CLOUDWATT = 'cloudwatt' HPCLOUD = 'hpcloud' KILI = 'kili' ONAPP = 'onapp' # Deprecated constants which aren't supported anymore RACKSPACE_UK = 'rackspace_uk' RACKSPACE_NOVA_BETA = 'rackspace_nova_beta' RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw' RACKSPACE_NOVA_LON = 'rackspace_nova_lon' RACKSPACE_NOVA_ORD = 'rackspace_nova_ord' EC2_US_EAST = 'ec2_us_east' EC2_US_EAST_OHIO = 'ec2_us_east_ohio' EC2_EU = 'ec2_eu_west' # deprecated name EC2_EU_WEST = 'ec2_eu_west' EC2_EU_WEST2 = 'ec2_eu_west_london' EC2_US_WEST = 'ec2_us_west' EC2_AP_SOUTHEAST = 'ec2_ap_southeast' EC2_AP_NORTHEAST = 'ec2_ap_northeast' EC2_AP_NORTHEAST1 = 'ec2_ap_northeast_1' EC2_AP_NORTHEAST2 = 'ec2_ap_northeast_2' EC2_US_WEST_OREGON = 'ec2_us_west_oregon' EC2_SA_EAST = 'ec2_sa_east' EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2' EC2_CA_CENTRAL1 = 'ec2_ca_central_1' ELASTICHOSTS_UK1 = 'elastichosts_uk1' ELASTICHOSTS_UK2 = 'elastichosts_uk2' ELASTICHOSTS_US1 = 'elastichosts_us1' ELASTICHOSTS_US2 = 'elastichosts_us2' ELASTICHOSTS_US3 = 'elastichosts_us3' ELASTICHOSTS_CA1 = 'elastichosts_ca1' ELASTICHOSTS_AU1 = 'elastichosts_au1' ELASTICHOSTS_CN1 = 'elastichosts_cn1' CLOUDSIGMA_US = 'cloudsigma_us' # Removed # SLICEHOST = 'slicehost' DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK, Provider.RACKSPACE_NOVA_BETA, Provider.RACKSPACE_NOVA_DFW, Provider.RACKSPACE_NOVA_LON, Provider.RACKSPACE_NOVA_ORD] OLD_CONSTANT_TO_NEW_MAPPING = { # Rackspace Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN, Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE, Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE, Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE, Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE, # AWS Provider.EC2_US_EAST: Provider.EC2, Provider.EC2_US_EAST_OHIO: Provider.EC2, Provider.EC2_EU: Provider.EC2, Provider.EC2_EU_WEST: Provider.EC2, Provider.EC2_EU_WEST2: Provider.EC2, Provider.EC2_US_WEST: Provider.EC2, Provider.EC2_AP_SOUTHEAST: Provider.EC2, Provider.EC2_AP_SOUTHEAST2: Provider.EC2, Provider.EC2_AP_NORTHEAST: Provider.EC2, Provider.EC2_AP_NORTHEAST1: Provider.EC2, Provider.EC2_AP_NORTHEAST2: Provider.EC2, Provider.EC2_US_WEST_OREGON: Provider.EC2, Provider.EC2_SA_EAST: Provider.EC2, Provider.EC2_AP_SOUTHEAST: Provider.EC2, Provider.EC2_CA_CENTRAL1: Provider.EC2, # ElasticHosts Provider.ELASTICHOSTS_UK1: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_UK2: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_US1: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_US2: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_US3: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_CA1: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_AU1: Provider.ELASTICHOSTS, Provider.ELASTICHOSTS_CN1: Provider.ELASTICHOSTS, } class NodeState(Type): """ Standard states for a node :cvar RUNNING: Node is running. :cvar STARTING: Node is starting up. :cvar REBOOTING: Node is rebooting. :cvar TERMINATED: Node is terminated. This node can't be started later on. :cvar STOPPING: Node is currently trying to stop. :cvar STOPPED: Node is stopped. This node can be started later on. :cvar PENDING: Node is pending. :cvar SUSPENDED: Node is suspended. :cvar ERROR: Node is an error state. Usually no operations can be performed on the node once it ends up in the error state. :cvar PAUSED: Node is paused. :cvar RECONFIGURING: Node is being reconfigured. :cvar UNKNOWN: Node state is unknown. """ RUNNING = 'running' STARTING = 'starting' REBOOTING = 'rebooting' TERMINATED = 'terminated' PENDING = 'pending' UNKNOWN = 'unknown' STOPPING = 'stopping' STOPPED = 'stopped' SUSPENDED = 'suspended' ERROR = 'error' PAUSED = 'paused' RECONFIGURING = 'reconfiguring' MIGRATING = 'migrating' NORMAL = 'normal' UPDATING = 'updating' class StorageVolumeState(Type): """ Standard states of a StorageVolume """ AVAILABLE = 'available' ERROR = 'error' INUSE = 'inuse' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' BACKUP = 'backup' ATTACHING = 'attaching' UNKNOWN = 'unknown' MIGRATING = 'migrating' UPDATING = 'updating' class VolumeSnapshotState(Type): """ Standard states of VolumeSnapshots """ AVAILABLE = 'available' ERROR = 'error' CREATING = 'creating' DELETING = 'deleting' RESTORING = 'restoring' UNKNOWN = 'unknown' UPDATING = 'updating' class Architecture(object): """ Image and size architectures. :cvar I386: i386 (32 bt) :cvar X86_64: x86_64 (64 bit) """ I386 = 0 X86_X64 = 1 class DeploymentError(LibcloudError): """ Exception used when a Deployment Task failed. :ivar node: :class:`Node` on which this exception happened, you might want to call :func:`Node.destroy` """ def __init__(self, node, original_exception=None, driver=None): self.node = node self.value = original_exception self.driver = driver def __str__(self): return self.__repr__() def __repr__(self): return (('' % (self.node.id, str(self.value), str(self.driver)))) class KeyPairError(LibcloudError): error_type = 'KeyPairError' def __init__(self, name, driver): self.name = name self.value = 'Key pair with name %s does not exist' % (name) super(KeyPairError, self).__init__(value=self.value, driver=driver) def __str__(self): return self.__repr__() def __repr__(self): return ('<%s name=%s, value=%s, driver=%s>' % (self.error_type, self.name, self.value, self.driver.name)) class KeyPairDoesNotExistError(KeyPairError): error_type = 'KeyPairDoesNotExistError' """Deprecated alias of :class:`DeploymentException`""" DeploymentException = DeploymentError apache-libcloud-2.2.1/libcloud/compute/drivers/0000775000175000017500000000000013160535107021346 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/compute/drivers/gandi.py0000664000175000017500000006356413153541406023021 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Gandi driver for compute """ import sys from datetime import datetime from libcloud.common.gandi import BaseGandiDriver, GandiException,\ NetworkInterface, IPAddress, Disk from libcloud.compute.base import KeyPair from libcloud.compute.base import StorageVolume from libcloud.compute.types import NodeState, Provider from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation NODE_STATE_MAP = { 'running': NodeState.RUNNING, 'halted': NodeState.TERMINATED, 'paused': NodeState.TERMINATED, 'locked': NodeState.TERMINATED, 'being_created': NodeState.PENDING, 'invalid': NodeState.UNKNOWN, 'legally_locked': NodeState.PENDING, 'deleted': NodeState.TERMINATED } NODE_PRICE_HOURLY_USD = 0.02 INSTANCE_TYPES = { 'small': { 'id': 'small', 'name': 'Small instance', 'cpu': 1, 'memory': 256, 'disk': 3, 'bandwidth': 10240, }, 'medium': { 'id': 'medium', 'name': 'Medium instance', 'cpu': 1, 'memory': 1024, 'disk': 20, 'bandwidth': 10240, }, 'large': { 'id': 'large', 'name': 'Large instance', 'cpu': 2, 'memory': 2048, 'disk': 50, 'bandwidth': 10240, }, 'x-large': { 'id': 'x-large', 'name': 'Extra Large instance', 'cpu': 4, 'memory': 4096, 'disk': 100, 'bandwidth': 10240, }, } class GandiNodeDriver(BaseGandiDriver, NodeDriver): """ Gandi node driver """ api_name = 'gandi' friendly_name = 'Gandi.net' website = 'http://www.gandi.net/' country = 'FR' type = Provider.GANDI # TODO : which features to enable ? features = {} def __init__(self, *args, **kwargs): """ @inherits: :class:`NodeDriver.__init__` """ super(BaseGandiDriver, self).__init__(*args, **kwargs) def _resource_info(self, type, id): try: obj = self.connection.request('hosting.%s.info' % type, int(id)) return obj.object except Exception: e = sys.exc_info()[1] raise GandiException(1003, e) def _node_info(self, id): return self._resource_info('vm', id) def _volume_info(self, id): return self._resource_info('disk', id) # Generic methods for driver def _to_node(self, vm): return Node( id=vm['id'], name=vm['hostname'], state=NODE_STATE_MAP.get( vm['state'], NodeState.UNKNOWN ), public_ips=vm.get('ips', []), private_ips=[], driver=self, extra={ 'ai_active': vm.get('ai_active'), 'datacenter_id': vm.get('datacenter_id'), 'description': vm.get('description') } ) def _to_nodes(self, vms): return [self._to_node(v) for v in vms] def _to_volume(self, disk): extra = {'can_snapshot': disk['can_snapshot']} return StorageVolume( id=disk['id'], name=disk['name'], size=int(disk['size']), driver=self, extra=extra) def _to_volumes(self, disks): return [self._to_volume(d) for d in disks] def list_nodes(self): """ Return a list of nodes in the current zone or all zones. :return: List of Node objects :rtype: ``list`` of :class:`Node` """ vms = self.connection.request('hosting.vm.list').object ips = self.connection.request('hosting.ip.list').object for vm in vms: vm['ips'] = [] for ip in ips: if vm['ifaces_id'][0] == ip['iface_id']: ip = ip.get('ip', None) if ip: vm['ips'].append(ip) nodes = self._to_nodes(vms) return nodes def ex_get_node(self, node_id): """ Return a Node object based on a node id. :param name: The ID of the node :type name: ``int`` :return: A Node object for the node :rtype: :class:`Node` """ vm = self.connection.request('hosting.vm.info', int(node_id)).object ips = self.connection.request('hosting.ip.list').object vm['ips'] = [] for ip in ips: if vm['ifaces_id'][0] == ip['iface_id']: ip = ip.get('ip', None) if ip: vm['ips'].append(ip) node = self._to_node(vm) return node def reboot_node(self, node): """ Reboot a node. :param node: Node to be rebooted :type node: :class:`Node` :return: True if successful, False if not :rtype: ``bool`` """ op = self.connection.request('hosting.vm.reboot', int(node.id)) self._wait_operation(op.object['id']) vm = self._node_info(int(node.id)) if vm['state'] == 'running': return True return False def destroy_node(self, node): """ Destroy a node. :param node: Node object to destroy :type node: :class:`Node` :return: True if successful :rtype: ``bool`` """ vm = self._node_info(node.id) if vm['state'] == 'running': # Send vm_stop and wait for accomplish op_stop = self.connection.request('hosting.vm.stop', int(node.id)) if not self._wait_operation(op_stop.object['id']): raise GandiException(1010, 'vm.stop failed') # Delete op = self.connection.request('hosting.vm.delete', int(node.id)) if self._wait_operation(op.object['id']): return True return False def deploy_node(self, **kwargs): """ deploy_node is not implemented for gandi driver :rtype: ``bool`` """ raise NotImplementedError( 'deploy_node not implemented for gandi driver') def create_node(self, **kwargs): """ Create a new Gandi node :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword image: OS Image to boot on node. (required) :type image: :class:`NodeImage` :keyword location: Which data center to create a node in. If empty, undefined behavior will be selected. (optional) :type location: :class:`NodeLocation` :keyword size: The size of resources allocated to this node. (required) :type size: :class:`NodeSize` :keyword login: user name to create for login on machine (required) :type login: ``str`` :keyword password: password for user that'll be created (required) :type password: ``str`` :keyword inet_family: version of ip to use, default 4 (optional) :type inet_family: ``int`` :keyword keypairs: IDs of keypairs or Keypairs object :type keypairs: list of ``int`` or :class:`.KeyPair` :rtype: :class:`Node` """ if not kwargs.get('login') and not kwargs.get('keypairs'): raise GandiException(1020, "Login and password or ssh keypair " "must be defined for node creation") location = kwargs.get('location') if location and isinstance(location, NodeLocation): dc_id = int(location.id) else: raise GandiException( 1021, 'location must be a subclass of NodeLocation') size = kwargs.get('size') if not size and not isinstance(size, NodeSize): raise GandiException( 1022, 'size must be a subclass of NodeSize') keypairs = kwargs.get('keypairs', []) keypair_ids = [ k if isinstance(k, int) else k.extra['id'] for k in keypairs ] # If size name is in INSTANCE_TYPE we use new rating model instance = INSTANCE_TYPES.get(size.id) cores = instance['cpu'] if instance else int(size.id) src_disk_id = int(kwargs['image'].id) disk_spec = { 'datacenter_id': dc_id, 'name': 'disk_%s' % kwargs['name'] } vm_spec = { 'datacenter_id': dc_id, 'hostname': kwargs['name'], 'memory': int(size.ram), 'cores': cores, 'bandwidth': int(size.bandwidth), 'ip_version': kwargs.get('inet_family', 4), } if kwargs.get('login') and kwargs.get('password'): vm_spec.update({ 'login': kwargs['login'], 'password': kwargs['password'], # TODO : use NodeAuthPassword }) if keypair_ids: vm_spec['keys'] = keypair_ids # Call create_from helper api. Return 3 operations : disk_create, # iface_create,vm_create (op_disk, op_iface, op_vm) = self.connection.request( 'hosting.vm.create_from', vm_spec, disk_spec, src_disk_id ).object # We wait for vm_create to finish if self._wait_operation(op_vm['id']): # after successful operation, get ip information # thru first interface node = self._node_info(op_vm['vm_id']) ifaces = node.get('ifaces') if len(ifaces) > 0: ips = ifaces[0].get('ips') if len(ips) > 0: node['ip'] = ips[0]['ip'] return self._to_node(node) return None def _to_image(self, img): return NodeImage( id=img['disk_id'], name=img['label'], driver=self.connection.driver ) def list_images(self, location=None): """ Return a list of image objects. :keyword location: Which data center to filter a images in. :type location: :class:`NodeLocation` :return: List of GCENodeImage objects :rtype: ``list`` of :class:`GCENodeImage` """ try: if location: filtering = {'datacenter_id': int(location.id)} else: filtering = {} images = self.connection.request('hosting.image.list', filtering) return [self._to_image(i) for i in images.object] except Exception: e = sys.exc_info()[1] raise GandiException(1011, e) def _to_size(self, id, size): return NodeSize( id=id, name='%s cores' % id, ram=size['memory'], disk=size['disk'], bandwidth=size['bandwidth'], price=(self._get_size_price(size_id='1') * id), driver=self.connection.driver, ) def _instance_type_to_size(self, instance): return NodeSize( id=instance['id'], name=instance['name'], ram=instance['memory'], disk=instance['disk'], bandwidth=instance['bandwidth'], price=self._get_size_price(size_id=instance['id']), driver=self.connection.driver, ) def list_instance_type(self, location=None): return [self._instance_type_to_size(instance) for name, instance in INSTANCE_TYPES.items()] def list_sizes(self, location=None): """ Return a list of sizes (machineTypes) in a zone. :keyword location: Which data center to filter a sizes in. :type location: :class:`NodeLocation` or ``None`` :return: List of NodeSize objects :rtype: ``list`` of :class:`NodeSize` """ account = self.connection.request('hosting.account.info').object if account.get('rating_enabled'): # This account use new rating model return self.list_instance_type(location) # Look for available shares, and return a list of share_definition available_res = account['resources']['available'] if available_res['shares'] == 0: return None else: share_def = account['share_definition'] available_cores = available_res['cores'] # 0.75 core given when creating a server max_core = int(available_cores + 0.75) shares = [] if available_res['servers'] < 1: # No server quota, no way return shares for i in range(1, max_core + 1): share = {id: i} share_is_available = True for k in ['memory', 'disk', 'bandwidth']: if share_def[k] * i > available_res[k]: # We run out for at least one resource inside share_is_available = False else: share[k] = share_def[k] * i if share_is_available: nb_core = i shares.append(self._to_size(nb_core, share)) return shares def _to_loc(self, loc): return NodeLocation( id=loc['id'], name=loc['dc_code'], country=loc['country'], driver=self ) def list_locations(self): """ Return a list of locations (datacenters). :return: List of NodeLocation objects :rtype: ``list`` of :class:`NodeLocation` """ res = self.connection.request('hosting.datacenter.list') return [self._to_loc(l) for l in res.object] def list_volumes(self): """ Return a list of volumes. :return: A list of volume objects. :rtype: ``list`` of :class:`StorageVolume` """ res = self.connection.request('hosting.disk.list', {}) return self._to_volumes(res.object) def ex_get_volume(self, volume_id): """ Return a Volume object based on a volume ID. :param volume_id: The ID of the volume :type volume_id: ``int`` :return: A StorageVolume object for the volume :rtype: :class:`StorageVolume` """ res = self.connection.request('hosting.disk.info', volume_id) return self._to_volume(res.object) def create_volume(self, size, name, location=None, snapshot=None): """ Create a volume (disk). :param size: Size of volume to create (in GB). :type size: ``int`` :param name: Name of volume to create :type name: ``str`` :keyword location: Location (zone) to create the volume in :type location: :class:`NodeLocation` or ``None`` :keyword snapshot: Snapshot to create image from :type snapshot: :class:`Snapshot` :return: Storage Volume object :rtype: :class:`StorageVolume` """ disk_param = { 'name': name, 'size': int(size), 'datacenter_id': int(location.id) } if snapshot: op = self.connection.request('hosting.disk.create_from', disk_param, int(snapshot.id)) else: op = self.connection.request('hosting.disk.create', disk_param) if self._wait_operation(op.object['id']): disk = self._volume_info(op.object['disk_id']) return self._to_volume(disk) return None def attach_volume(self, node, volume, device=None): """ Attach a volume to a node. :param node: The node to attach the volume to :type node: :class:`Node` :param volume: The volume to attach. :type volume: :class:`StorageVolume` :keyword device: Not used in this cloud. :type device: ``None`` :return: True if successful :rtype: ``bool`` """ op = self.connection.request('hosting.vm.disk_attach', int(node.id), int(volume.id)) if self._wait_operation(op.object['id']): return True return False def detach_volume(self, node, volume): """ Detaches a volume from a node. :param node: Node which should be used :type node: :class:`Node` :param volume: Volume to be detached :type volume: :class:`StorageVolume` :rtype: ``bool`` """ op = self.connection.request('hosting.vm.disk_detach', int(node.id), int(volume.id)) if self._wait_operation(op.object['id']): return True return False def destroy_volume(self, volume): """ Destroy a volume. :param volume: Volume object to destroy :type volume: :class:`StorageVolume` :return: True if successful :rtype: ``bool`` """ op = self.connection.request('hosting.disk.delete', int(volume.id)) if self._wait_operation(op.object['id']): return True return False def _to_iface(self, iface): ips = [] for ip in iface.get('ips', []): new_ip = IPAddress( ip['id'], NODE_STATE_MAP.get( ip['state'], NodeState.UNKNOWN ), ip['ip'], self.connection.driver, version=ip.get('version'), extra={'reverse': ip['reverse']} ) ips.append(new_ip) return NetworkInterface( iface['id'], NODE_STATE_MAP.get( iface['state'], NodeState.UNKNOWN ), mac_address=None, driver=self.connection.driver, ips=ips, node_id=iface.get('vm_id'), extra={'bandwidth': iface['bandwidth']}, ) def _to_ifaces(self, ifaces): return [self._to_iface(i) for i in ifaces] def ex_list_interfaces(self): """ Specific method to list network interfaces :rtype: ``list`` of :class:`GandiNetworkInterface` """ ifaces = self.connection.request('hosting.iface.list').object ips = self.connection.request('hosting.ip.list').object for iface in ifaces: iface['ips'] = list( filter(lambda i: i['iface_id'] == iface['id'], ips)) return self._to_ifaces(ifaces) def _to_disk(self, element): disk = Disk( id=element['id'], state=NODE_STATE_MAP.get( element['state'], NodeState.UNKNOWN ), name=element['name'], driver=self.connection.driver, size=element['size'], extra={'can_snapshot': element['can_snapshot']} ) return disk def _to_disks(self, elements): return [self._to_disk(el) for el in elements] def ex_list_disks(self): """ Specific method to list all disk :rtype: ``list`` of :class:`GandiDisk` """ res = self.connection.request('hosting.disk.list', {}) return self._to_disks(res.object) def ex_node_attach_disk(self, node, disk): """ Specific method to attach a disk to a node :param node: Node which should be used :type node: :class:`Node` :param disk: Disk which should be used :type disk: :class:`GandiDisk` :rtype: ``bool`` """ op = self.connection.request('hosting.vm.disk_attach', int(node.id), int(disk.id)) if self._wait_operation(op.object['id']): return True return False def ex_node_detach_disk(self, node, disk): """ Specific method to detach a disk from a node :param node: Node which should be used :type node: :class:`Node` :param disk: Disk which should be used :type disk: :class:`GandiDisk` :rtype: ``bool`` """ op = self.connection.request('hosting.vm.disk_detach', int(node.id), int(disk.id)) if self._wait_operation(op.object['id']): return True return False def ex_node_attach_interface(self, node, iface): """ Specific method to attach an interface to a node :param node: Node which should be used :type node: :class:`Node` :param iface: Network interface which should be used :type iface: :class:`GandiNetworkInterface` :rtype: ``bool`` """ op = self.connection.request('hosting.vm.iface_attach', int(node.id), int(iface.id)) if self._wait_operation(op.object['id']): return True return False def ex_node_detach_interface(self, node, iface): """ Specific method to detach an interface from a node :param node: Node which should be used :type node: :class:`Node` :param iface: Network interface which should be used :type iface: :class:`GandiNetworkInterface` :rtype: ``bool`` """ op = self.connection.request('hosting.vm.iface_detach', int(node.id), int(iface.id)) if self._wait_operation(op.object['id']): return True return False def ex_snapshot_disk(self, disk, name=None): """ Specific method to make a snapshot of a disk :param disk: Disk which should be used :type disk: :class:`GandiDisk` :param name: Name which should be used :type name: ``str`` :rtype: ``bool`` """ if not disk.extra.get('can_snapshot'): raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id) if not name: suffix = datetime.today().strftime('%Y%m%d') name = 'snap_%s' % (suffix) op = self.connection.request( 'hosting.disk.create_from', {'name': name, 'type': 'snapshot', }, int(disk.id), ) if self._wait_operation(op.object['id']): return True return False def ex_update_disk(self, disk, new_size=None, new_name=None): """Specific method to update size or name of a disk WARNING: if a server is attached it'll be rebooted :param disk: Disk which should be used :type disk: :class:`GandiDisk` :param new_size: New size :type new_size: ``int`` :param new_name: New name :type new_name: ``str`` :rtype: ``bool`` """ params = {} if new_size: params.update({'size': new_size}) if new_name: params.update({'name': new_name}) op = self.connection.request('hosting.disk.update', int(disk.id), params) if self._wait_operation(op.object['id']): return True return False def _to_key_pair(self, data): key_pair = KeyPair(name=data['name'], fingerprint=data['fingerprint'], public_key=data.get('value', None), private_key=data.get('privatekey', None), driver=self, extra={'id': data['id']}) return key_pair def _to_key_pairs(self, data): return [self._to_key_pair(k) for k in data] def list_key_pairs(self): """ List registered key pairs. :return: A list of key par objects. :rtype: ``list`` of :class:`libcloud.compute.base.KeyPair` """ kps = self.connection.request('hosting.ssh.list').object return self._to_key_pairs(kps) def get_key_pair(self, name): """ Retrieve a single key pair. :param name: Name of the key pair to retrieve. :type name: ``str`` :rtype: :class:`.KeyPair` """ filter_params = {'name': name} kps = self.connection.request('hosting.ssh.list', filter_params).object return self._to_key_pair(kps[0]) def import_key_pair_from_string(self, name, key_material): """ Create a new key pair object. :param name: Key pair name. :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :return: Imported key pair object. :rtype: :class:`.KeyPair` """ params = {'name': name, 'value': key_material} kp = self.connection.request('hosting.ssh.create', params).object return self._to_key_pair(kp) def delete_key_pair(self, key_pair): """ Delete an existing key pair. :param key_pair: Key pair object or ID. :type key_pair: :class.KeyPair` or ``int`` :return: True of False based on success of Keypair deletion :rtype: ``bool`` """ key_id = key_pair if isinstance(key_pair, int) \ else key_pair.extra['id'] success = self.connection.request('hosting.ssh.delete', key_id).object return success apache-libcloud-2.2.1/libcloud/compute/drivers/ntta.py0000664000175000017500000000373312701023453022670 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NTT America Driver """ from libcloud.compute.providers import Provider from libcloud.common.dimensiondata import (DimensionDataConnection, API_ENDPOINTS) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver DEFAULT_REGION = 'ntta-na' class NTTAmericaNodeDriver(DimensionDataNodeDriver): """ NTT America node driver, based on Dimension Data driver """ selected_region = None connectionCls = DimensionDataConnection name = 'NTTAmerica' website = 'http://www.nttamerica.com/' type = Provider.NTTA features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self.selected_region = API_ENDPOINTS[region] super(NTTAmericaNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/vpsnet.py0000664000175000017500000001476012701023453023243 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VPS.net driver """ import base64 try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import b from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.common.types import InvalidCredsError, MalformedResponseError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation API_HOST = 'api.vps.net' API_VERSION = 'api10json' RAM_PER_NODE = 256 DISK_PER_NODE = 10 BANDWIDTH_PER_NODE = 250 class VPSNetResponse(JsonResponse): def parse_body(self): try: return super(VPSNetResponse, self).parse_body() except MalformedResponseError: return self.body def success(self): # vps.net wrongly uses 406 for invalid auth creds if self.status == 406 or self.status == 403: raise InvalidCredsError() return True def parse_error(self): try: errors = super(VPSNetResponse, self).parse_body()['errors'][0] except MalformedResponseError: return self.body else: return "\n".join(errors) class VPSNetConnection(ConnectionUserAndKey): """ Connection class for the VPS.net driver """ host = API_HOST responseCls = VPSNetResponse allow_insecure = False def add_default_headers(self, headers): user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class VPSNetNodeDriver(NodeDriver): """ VPS.net node driver """ type = Provider.VPSNET api_name = 'vps_net' name = "vps.net" website = 'http://vps.net/' connectionCls = VPSNetConnection def _to_node(self, vm): if vm['running']: state = NodeState.RUNNING else: state = NodeState.PENDING n = Node(id=vm['id'], name=vm['label'], state=state, public_ips=[vm.get('primary_ip_address', None)], private_ips=[], extra={'slices_count': vm['slices_count']}, # Number of nodes consumed by VM driver=self.connection.driver) return n def _to_image(self, image, cloud): image = NodeImage(id=image['id'], name="%s: %s" % (cloud, image['label']), driver=self.connection.driver) return image def _to_size(self, num): size = NodeSize(id=num, name="%d Node" % (num,), ram=RAM_PER_NODE * num, disk=DISK_PER_NODE, bandwidth=BANDWIDTH_PER_NODE * num, price=self._get_price_per_node(num) * num, driver=self.connection.driver) return size def _get_price_per_node(self, num): single_node_price = self._get_size_price(size_id='1') return num * single_node_price def create_node(self, name, image, size, **kwargs): """Create a new VPS.net node @inherits: :class:`NodeDriver.create_node` :keyword ex_backups_enabled: Enable automatic backups :type ex_backups_enabled: ``bool`` :keyword ex_fqdn: Fully Qualified domain of the node :type ex_fqdn: ``str`` """ headers = {'Content-Type': 'application/json'} request = {'virtual_machine': {'label': name, 'fqdn': kwargs.get('ex_fqdn', ''), 'system_template_id': image.id, 'backups_enabled': kwargs.get('ex_backups_enabled', 0), 'slices_required': size.id}} res = self.connection.request('/virtual_machines.%s' % (API_VERSION,), data=json.dumps(request), headers=headers, method='POST') node = self._to_node(res.object['virtual_machine']) return node def reboot_node(self, node): res = self.connection.request( '/virtual_machines/%s/%s.%s' % (node.id, 'reboot', API_VERSION), method="POST") node = self._to_node(res.object['virtual_machine']) return True def list_sizes(self, location=None): res = self.connection.request('/nodes.%s' % (API_VERSION,)) available_nodes = len([size for size in res.object if size['slice']['virtual_machine_id']]) sizes = [self._to_size(i) for i in range(1, available_nodes + 1)] return sizes def destroy_node(self, node): res = self.connection.request('/virtual_machines/%s.%s' % (node.id, API_VERSION), method='DELETE') return res.status == 200 def list_nodes(self): res = self.connection.request('/virtual_machines.%s' % (API_VERSION,)) return [self._to_node(i['virtual_machine']) for i in res.object] def list_images(self, location=None): res = self.connection.request('/available_clouds.%s' % (API_VERSION,)) images = [] for cloud in res.object: label = cloud['cloud']['label'] templates = cloud['cloud']['system_templates'] images.extend([self._to_image(image, label) for image in templates]) return images def list_locations(self): return [NodeLocation(0, "VPS.net Western US", 'US', self)] apache-libcloud-2.2.1/libcloud/compute/drivers/__init__.py0000664000175000017500000000227012715310131023451 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Drivers for working with different providers """ __all__ = [ 'abiquo', 'brightbox', 'bluebox', 'dimensiondata', 'dummy', 'ec2', 'ecp', 'elasticstack', 'elastichosts', 'cloudsigma', 'gce', 'gogrid', 'hostvirtual', 'ibm_sce', 'linode', 'opennebula', 'rackspace', 'rimuhosting', 'softlayer', 'vcloud', 'voxel', 'vpsnet', 'onapp', ] apache-libcloud-2.2.1/libcloud/compute/drivers/hostvirtual.py0000664000175000017500000003316313153541406024313 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ libcloud driver for the Host Virtual Inc. (VR) API Home page https://www.hostvirtual.com/ """ import time import re try: import simplejson as json except ImportError: import json from libcloud.common.hostvirtual import HostVirtualResponse from libcloud.common.hostvirtual import HostVirtualConnection from libcloud.common.hostvirtual import HostVirtualException from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeImage, NodeSize, NodeLocation from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword API_ROOT = '' NODE_STATE_MAP = { 'BUILDING': NodeState.PENDING, 'PENDING': NodeState.PENDING, 'RUNNING': NodeState.RUNNING, # server is powered up 'STOPPING': NodeState.REBOOTING, 'REBOOTING': NodeState.REBOOTING, 'STARTING': NodeState.REBOOTING, 'TERMINATED': NodeState.TERMINATED, # server is powered down 'STOPPED': NodeState.STOPPED } DEFAULT_NODE_LOCATION_ID = 21 class HostVirtualComputeResponse(HostVirtualResponse): pass class HostVirtualComputeConnection(HostVirtualConnection): responseCls = HostVirtualComputeResponse class HostVirtualNodeDriver(NodeDriver): type = Provider.HOSTVIRTUAL name = 'HostVirtual' website = 'http://www.hostvirtual.com' connectionCls = HostVirtualComputeConnection features = {'create_node': ['ssh_key', 'password']} def __init__(self, key, secure=True, host=None, port=None): self.location = None super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure, host=host, port=port) def list_nodes(self): try: result = self.connection.request( API_ROOT + '/cloud/servers/').object except HostVirtualException: return [] nodes = [] for value in result: node = self._to_node(value) nodes.append(node) return nodes def list_locations(self): result = self.connection.request(API_ROOT + '/cloud/locations/').object locations = [] for k in result: dc = result[k] locations.append(NodeLocation( dc["id"], dc["name"], dc["name"].split(',')[1].replace(" ", ""), # country self)) return sorted(locations, key=lambda x: int(x.id)) def list_sizes(self, location=None): params = {} if location is not None: params = {'location': location.id} result = self.connection.request( API_ROOT + '/cloud/sizes/', params=params).object sizes = [] for size in result: n = NodeSize(id=size['plan_id'], name=size['plan'], ram=size['ram'], disk=size['disk'], bandwidth=size['transfer'], price=size['price'], driver=self.connection.driver) sizes.append(n) return sizes def list_images(self): result = self.connection.request(API_ROOT + '/cloud/images/').object images = [] for image in result: i = NodeImage(id=image["id"], name=image["os"], driver=self.connection.driver, extra=image) del i.extra['id'] del i.extra['os'] images.append(i) return images def create_node(self, name, image, size, **kwargs): """ Creates a node Example of node creation with ssh key deployed: >>> from libcloud.compute.base import NodeAuthSSHKey >>> key = open('/home/user/.ssh/id_rsa.pub').read() >>> auth = NodeAuthSSHKey(pubkey=key) >>> from libcloud.compute.providers import get_driver; >>> driver = get_driver('hostvirtual') >>> conn = driver('API_KEY') >>> image = conn.list_images()[1] >>> size = conn.list_sizes()[0] >>> location = conn.list_locations()[1] >>> name = 'markos-dev' >>> node = conn.create_node(name, image, size, auth=auth, >>> location=location) """ dc = None auth = self._get_and_check_auth(kwargs.get('auth')) if not self._is_valid_fqdn(name): raise HostVirtualException( 500, "Name should be a valid FQDN (e.g, hostname.example.com)") # simply order a package first pkg = self.ex_order_package(size) if 'location' in kwargs: dc = kwargs['location'].id else: dc = DEFAULT_NODE_LOCATION_ID # create a stub node stub_node = self._to_node({ 'mbpkgid': pkg['id'], 'status': 'PENDING', 'fqdn': name, 'plan_id': size.id, 'os_id': image.id, 'location_id': dc }) # provisioning a server using the stub node self.ex_provision_node(node=stub_node, auth=auth) node = self._wait_for_node(stub_node.id) if getattr(auth, 'generated', False): node.extra['password'] = auth.password return node def reboot_node(self, node): params = {'force': 0, 'mbpkgid': node.id} result = self.connection.request( API_ROOT + '/cloud/server/reboot', data=json.dumps(params), method='POST').object return bool(result) def destroy_node(self, node): params = { 'mbpkgid': node.id, # 'reason': 'Submitted through Libcloud API' } result = self.connection.request( API_ROOT + '/cloud/cancel', data=json.dumps(params), method='POST').object return bool(result) def ex_list_packages(self): """ List the server packages. """ try: result = self.connection.request( API_ROOT + '/cloud/packages/').object except HostVirtualException: return [] pkgs = [] for value in result: pkgs.append(value) return pkgs def ex_order_package(self, size): """ Order a server package. :param size: :type node: :class:`NodeSize` :rtype: ``str`` """ params = {'plan': size.name} pkg = self.connection.request(API_ROOT + '/cloud/buy/', data=json.dumps(params), method='POST').object return pkg def ex_cancel_package(self, node): """ Cancel a server package. :param node: Node which should be used :type node: :class:`Node` :rtype: ``str`` """ params = {'mbpkgid': node.id} result = self.connection.request(API_ROOT + '/cloud/cancel/', data=json.dumps(params), method='POST').object return result def ex_unlink_package(self, node): """ Unlink a server package from location. :param node: Node which should be used :type node: :class:`Node` :rtype: ``str`` """ params = {'mbpkgid': node.id} result = self.connection.request(API_ROOT + '/cloud/unlink/', data=json.dumps(params), method='POST').object return result def ex_get_node(self, node_id): """ Get a single node. :param node_id: id of the node that we need the node object for :type node_id: ``str`` :rtype: :class:`Node` """ params = {'mbpkgid': node_id} result = self.connection.request( API_ROOT + '/cloud/server', params=params).object node = self._to_node(result) return node def ex_stop_node(self, node): """ Stop a node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ params = {'force': 0, 'mbpkgid': node.id} result = self.connection.request( API_ROOT + '/cloud/server/shutdown', data=json.dumps(params), method='POST').object return bool(result) def ex_start_node(self, node): """ Start a node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ params = {'mbpkgid': node.id} result = self.connection.request( API_ROOT + '/cloud/server/start', data=json.dumps(params), method='POST').object return bool(result) def ex_provision_node(self, **kwargs): """ Provision a server on a VR package and get it booted :keyword node: node which should be used :type node: :class:`Node` :keyword image: The distribution to deploy on your server (mandatory) :type image: :class:`NodeImage` :keyword auth: an SSH key or root password (mandatory) :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` :keyword location: which datacenter to create the server in :type location: :class:`NodeLocation` :return: Node representing the newly built server :rtype: :class:`Node` """ node = kwargs['node'] if 'image' in kwargs: image = kwargs['image'] else: image = node.extra['image'] params = { 'mbpkgid': node.id, 'image': image, 'fqdn': node.name, 'location': node.extra['location'], } auth = kwargs['auth'] ssh_key = None password = None if isinstance(auth, NodeAuthSSHKey): ssh_key = auth.pubkey params['ssh_key'] = ssh_key elif isinstance(auth, NodeAuthPassword): password = auth.password params['password'] = password if not ssh_key and not password: raise HostVirtualException( 500, "SSH key or Root password is required") try: result = self.connection.request(API_ROOT + '/cloud/server/build', data=json.dumps(params), method='POST').object return bool(result) except HostVirtualException: self.ex_cancel_package(node) def ex_delete_node(self, node): """ Delete a node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ params = {'mbpkgid': node.id} result = self.connection.request( API_ROOT + '/cloud/server/delete', data=json.dumps(params), method='POST').object return bool(result) def _to_node(self, data): state = NODE_STATE_MAP[data['status']] public_ips = [] private_ips = [] extra = {} if 'plan_id' in data: extra['size'] = data['plan_id'] if 'os_id' in data: extra['image'] = data['os_id'] if 'fqdn' in data: extra['fqdn'] = data['fqdn'] if 'location_id' in data: extra['location'] = data['location_id'] if 'ip' in data: public_ips.append(data['ip']) node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra) return node def _wait_for_node(self, node_id, timeout=30, interval=5.0): """ :param node_id: ID of the node to wait for. :type node_id: ``int`` :param timeout: Timeout (in seconds). :type timeout: ``int`` :param interval: How long to wait (in seconds) between each attempt. :type interval: ``float`` :return: Node representing the newly built server :rtype: :class:`Node` """ # poll until we get a node for i in range(0, timeout, int(interval)): try: node = self.ex_get_node(node_id) return node except HostVirtualException: time.sleep(interval) raise HostVirtualException(412, 'Timeout on getting node details') def _is_valid_fqdn(self, fqdn): if len(fqdn) > 255: return False if fqdn[-1] == ".": fqdn = fqdn[:-1] valid = re.compile("(?!-)[A-Z\d-]{1,63}(? 1: return all(valid.match(x) for x in fqdn.split(".")) else: return False apache-libcloud-2.2.1/libcloud/compute/drivers/vcloud.py0000664000175000017500000024743313153541406023232 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VMware vCloud driver. """ import copy import sys import re import base64 import os from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import b from libcloud.utils.py3 import next from libcloud.utils.py3 import ET urlparse = urlparse.urlparse import time from xml.parsers.expat import ExpatError from libcloud.common.base import XmlResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver, NodeLocation from libcloud.compute.base import NodeSize, NodeImage """ From vcloud api "The VirtualQuantity element defines the number of MB of memory. This should be either 512 or a multiple of 1024 (1 GB)." """ VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1, 9)] # Default timeout (in seconds) for long running tasks DEFAULT_TASK_COMPLETION_TIMEOUT = 600 DEFAULT_API_VERSION = '0.8' """ Valid vCloud API v1.5 input values. """ VIRTUAL_CPU_VALS_1_5 = [i for i in range(1, 9)] FENCE_MODE_VALS_1_5 = ['bridged', 'isolated', 'natRouted'] IP_MODE_VALS_1_5 = ['POOL', 'DHCP', 'MANUAL', 'NONE'] def fixxpath(root, xpath): """ElementTree wants namespaces in its xpaths, so here we add them.""" namespace, root_tag = root.tag[1:].split("}", 1) fixed_xpath = "/".join(["{%s}%s" % (namespace, e) for e in xpath.split("/")]) return fixed_xpath def get_url_path(url): return urlparse(url.strip()).path class Vdc(object): """ Virtual datacenter (vDC) representation """ def __init__(self, id, name, driver, allocation_model=None, cpu=None, memory=None, storage=None): self.id = id self.name = name self.driver = driver self.allocation_model = allocation_model self.cpu = cpu self.memory = memory self.storage = storage def __repr__(self): return ('' % (self.id, self.name, self.driver.name)) class Capacity(object): """ Represents CPU, Memory or Storage capacity of vDC. """ def __init__(self, limit, used, units): self.limit = limit self.used = used self.units = units def __repr__(self): return ('' % (self.limit, self.used, self.units)) class ControlAccess(object): """ Represents control access settings of a node """ class AccessLevel(object): READ_ONLY = 'ReadOnly' CHANGE = 'Change' FULL_CONTROL = 'FullControl' def __init__(self, node, everyone_access_level, subjects=None): self.node = node self.everyone_access_level = everyone_access_level if not subjects: subjects = [] self.subjects = subjects def __repr__(self): return ('' % (self.node, self.everyone_access_level, self.subjects)) class Subject(object): """ User or group subject """ def __init__(self, type, name, access_level, id=None): self.type = type self.name = name self.access_level = access_level self.id = id def __repr__(self): return ('' % (self.type, self.name, self.access_level)) class InstantiateVAppXML(object): def __init__(self, name, template, net_href, cpus, memory, password=None, row=None, group=None): self.name = name self.template = template self.net_href = net_href self.cpus = cpus self.memory = memory self.password = password self.row = row self.group = group self._build_xmltree() def tostring(self): return ET.tostring(self.root) def _build_xmltree(self): self.root = self._make_instantiation_root() self._add_vapp_template(self.root) instantiation_params = ET.SubElement(self.root, "InstantiationParams") # product and virtual hardware self._make_product_section(instantiation_params) self._make_virtual_hardware(instantiation_params) network_config_section = ET.SubElement(instantiation_params, "NetworkConfigSection") network_config = ET.SubElement(network_config_section, "NetworkConfig") self._add_network_association(network_config) def _make_instantiation_root(self): return ET.Element( "InstantiateVAppTemplateParams", {'name': self.name, 'xml:lang': 'en', 'xmlns': "http://www.vmware.com/vcloud/v0.8", 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} ) def _add_vapp_template(self, parent): return ET.SubElement( parent, "VAppTemplate", {'href': self.template} ) def _make_product_section(self, parent): prod_section = ET.SubElement( parent, "ProductSection", {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8", 'xmlns:ovf': "http://schemas.dmtf.org/ovf/envelope/1"} ) if self.password: self._add_property(prod_section, 'password', self.password) if self.row: self._add_property(prod_section, 'row', self.row) if self.group: self._add_property(prod_section, 'group', self.group) return prod_section def _add_property(self, parent, ovfkey, ovfvalue): return ET.SubElement( parent, "Property", {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1', 'ovf:key': ovfkey, 'ovf:value': ovfvalue} ) def _make_virtual_hardware(self, parent): vh = ET.SubElement( parent, "VirtualHardwareSection", {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8"} ) self._add_cpu(vh) self._add_memory(vh) return vh def _add_cpu(self, parent): cpu_item = ET.SubElement( parent, "Item", {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} ) self._add_instance_id(cpu_item, '1') self._add_resource_type(cpu_item, '3') self._add_virtual_quantity(cpu_item, self.cpus) return cpu_item def _add_memory(self, parent): mem_item = ET.SubElement( parent, 'Item', {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} ) self._add_instance_id(mem_item, '2') self._add_resource_type(mem_item, '4') self._add_virtual_quantity(mem_item, self.memory) return mem_item def _add_instance_id(self, parent, id): elm = ET.SubElement( parent, 'InstanceID', {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData'} ) elm.text = id return elm def _add_resource_type(self, parent, type): elm = ET.SubElement( parent, 'ResourceType', {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData'} ) elm.text = type return elm def _add_virtual_quantity(self, parent, amount): elm = ET.SubElement( parent, 'VirtualQuantity', {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData'} ) elm.text = amount return elm def _add_network_association(self, parent): return ET.SubElement( parent, 'NetworkAssociation', {'href': self.net_href} ) class VCloudResponse(XmlResponse): def success(self): return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.ACCEPTED) class VCloudConnection(ConnectionUserAndKey): """ Connection class for the vCloud driver """ responseCls = VCloudResponse token = None host = None def request(self, *args, **kwargs): self._get_auth_token() return super(VCloudConnection, self).request(*args, **kwargs) def check_org(self): # the only way to get our org is by logging in. self._get_auth_token() def _get_auth_headers(self): """Some providers need different headers than others""" return { 'Authorization': "Basic %s" % base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8'), 'Content-Length': '0', 'Accept': 'application/*+xml' } def _get_auth_token(self): if not self.token: self.connection.request(method='POST', url='/api/v0.8/login', headers=self._get_auth_headers()) resp = self.connection.getresponse() headers = resp.headers body = ET.XML(resp.text) try: self.token = headers['set-cookie'] except KeyError: raise InvalidCredsError() self.driver.org = get_url_path( body.find(fixxpath(body, 'Org')).get('href') ) def add_default_headers(self, headers): headers['Cookie'] = self.token headers['Accept'] = 'application/*+xml' return headers class VCloudNodeDriver(NodeDriver): """ vCloud node driver """ type = Provider.VCLOUD name = 'vCloud' website = 'http://www.vmware.com/products/vcloud/' connectionCls = VCloudConnection org = None _vdcs = None NODE_STATE_MAP = {'0': NodeState.PENDING, '1': NodeState.PENDING, '2': NodeState.PENDING, '3': NodeState.PENDING, '4': NodeState.RUNNING} features = {'create_node': ['password']} def __new__(cls, key, secret=None, secure=True, host=None, port=None, api_version=DEFAULT_API_VERSION, **kwargs): if cls is VCloudNodeDriver: if api_version == '0.8': cls = VCloudNodeDriver elif api_version == '1.5': cls = VCloud_1_5_NodeDriver elif api_version == '5.1': cls = VCloud_5_1_NodeDriver elif api_version == '5.5': cls = VCloud_5_5_NodeDriver else: raise NotImplementedError( "No VCloudNodeDriver found for API version %s" % (api_version)) return super(VCloudNodeDriver, cls).__new__(cls) @property def vdcs(self): """ vCloud virtual data centers (vDCs). :return: list of vDC objects :rtype: ``list`` of :class:`Vdc` """ if not self._vdcs: self.connection.check_org() # make sure the org is set. res = self.connection.request(self.org) self._vdcs = [ self._to_vdc( self.connection.request(get_url_path(i.get('href'))).object ) for i in res.object.findall(fixxpath(res.object, "Link")) if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml' ] return self._vdcs def _to_vdc(self, vdc_elm): return Vdc(vdc_elm.get('href'), vdc_elm.get('name'), self) def _get_vdc(self, vdc_name): vdc = None if not vdc_name: # Return the first organisation VDC found vdc = self.vdcs[0] else: for v in self.vdcs: if v.name == vdc_name: vdc = v if vdc is None: raise ValueError('%s virtual data centre could not be found', vdc_name) return vdc @property def networks(self): networks = [] for vdc in self.vdcs: res = self.connection.request(get_url_path(vdc.id)).object networks.extend( [network for network in res.findall( fixxpath(res, 'AvailableNetworks/Network') )] ) return networks def _to_image(self, image): image = NodeImage(id=image.get('href'), name=image.get('name'), driver=self.connection.driver) return image def _to_node(self, elm): state = self.NODE_STATE_MAP[elm.get('status')] name = elm.get('name') public_ips = [] private_ips = [] # Following code to find private IPs works for Terremark connections = elm.findall('%s/%s' % ( '{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection', fixxpath(elm, 'NetworkConnection')) ) if not connections: connections = elm.findall( fixxpath( elm, 'Children/Vm/NetworkConnectionSection/NetworkConnection')) for connection in connections: ips = [ip.text for ip in connection.findall(fixxpath(elm, "IpAddress"))] if connection.get('Network') == 'Internal': private_ips.extend(ips) else: public_ips.extend(ips) node = Node(id=elm.get('href'), name=name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver) return node def _get_catalog_hrefs(self): res = self.connection.request(self.org) catalogs = [ i.get('href') for i in res.object.findall(fixxpath(res.object, "Link")) if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml' ] return catalogs def _wait_for_task_completion(self, task_href, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): start_time = time.time() res = self.connection.request(get_url_path(task_href)) status = res.object.get('status') while status != 'success': if status == 'error': # Get error reason from the response body error_elem = res.object.find(fixxpath(res.object, 'Error')) error_msg = "Unknown error" if error_elem is not None: error_msg = error_elem.get('message') raise Exception("Error status returned by task %s.: %s" % (task_href, error_msg)) if status == 'canceled': raise Exception("Canceled status returned by task %s." % task_href) if (time.time() - start_time >= timeout): raise Exception("Timeout (%s sec) while waiting for task %s." % (timeout, task_href)) time.sleep(5) res = self.connection.request(get_url_path(task_href)) status = res.object.get('status') def destroy_node(self, node): node_path = get_url_path(node.id) # blindly poweroff node, it will throw an exception if already off try: res = self.connection.request('%s/power/action/poweroff' % node_path, method='POST') self._wait_for_task_completion(res.object.get('href')) except Exception: pass try: res = self.connection.request('%s/action/undeploy' % node_path, method='POST') self._wait_for_task_completion(res.object.get('href')) except ExpatError: # The undeploy response is malformed XML atm. # We can remove this whent he providers fix the problem. pass except Exception: # Some vendors don't implement undeploy at all yet, # so catch this and move on. pass res = self.connection.request(node_path, method='DELETE') return res.status == httplib.ACCEPTED def reboot_node(self, node): res = self.connection.request('%s/power/action/reset' % get_url_path(node.id), method='POST') return res.status in [httplib.ACCEPTED, httplib.NO_CONTENT] def list_nodes(self): return self.ex_list_nodes() def ex_list_nodes(self, vdcs=None): """ List all nodes across all vDCs. Using 'vdcs' you can specify which vDCs should be queried. :param vdcs: None, vDC or a list of vDCs to query. If None all vDCs will be queried. :type vdcs: :class:`Vdc` :rtype: ``list`` of :class:`Node` """ if not vdcs: vdcs = self.vdcs if not isinstance(vdcs, (list, tuple)): vdcs = [vdcs] nodes = [] for vdc in vdcs: res = self.connection.request(get_url_path(vdc.id)) elms = res.object.findall(fixxpath( res.object, "ResourceEntities/ResourceEntity") ) vapps = [ (i.get('name'), i.get('href')) for i in elms if i.get('type') == 'application/vnd.vmware.vcloud.vApp+xml' and i.get('name') ] for vapp_name, vapp_href in vapps: try: res = self.connection.request( get_url_path(vapp_href), headers={'Content-Type': 'application/vnd.vmware.vcloud.vApp+xml'} ) nodes.append(self._to_node(res.object)) except Exception: # The vApp was probably removed since the previous vDC # query, ignore e = sys.exc_info()[1] if not (e.args[0].tag.endswith('Error') and e.args[0].get('minorErrorCode') == 'ACCESS_TO_RESOURCE_IS_FORBIDDEN'): raise return nodes def _to_size(self, ram): ns = NodeSize( id=None, name="%s Ram" % ram, ram=ram, disk=None, bandwidth=None, price=None, driver=self.connection.driver ) return ns def list_sizes(self, location=None): sizes = [self._to_size(i) for i in VIRTUAL_MEMORY_VALS] return sizes def _get_catalogitems_hrefs(self, catalog): """Given a catalog href returns contained catalog item hrefs""" res = self.connection.request( get_url_path(catalog), headers={ 'Content-Type': 'application/vnd.vmware.vcloud.catalog+xml' } ).object cat_items = res.findall(fixxpath(res, "CatalogItems/CatalogItem")) cat_item_hrefs = [i.get('href') for i in cat_items if i.get('type') == 'application/vnd.vmware.vcloud.catalogItem+xml'] return cat_item_hrefs def _get_catalogitem(self, catalog_item): """Given a catalog item href returns elementree""" res = self.connection.request( get_url_path(catalog_item), headers={ 'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml' } ).object return res def list_images(self, location=None): images = [] for vdc in self.vdcs: res = self.connection.request(get_url_path(vdc.id)).object res_ents = res.findall(fixxpath( res, "ResourceEntities/ResourceEntity") ) images += [ self._to_image(i) for i in res_ents if i.get('type') == 'application/vnd.vmware.vcloud.vAppTemplate+xml' ] for catalog in self._get_catalog_hrefs(): for cat_item in self._get_catalogitems_hrefs(catalog): res = self._get_catalogitem(cat_item) res_ents = res.findall(fixxpath(res, 'Entity')) images += [ self._to_image(i) for i in res_ents if i.get('type') == 'application/vnd.vmware.vcloud.vAppTemplate+xml' ] def idfun(image): return image.id return self._uniquer(images, idfun) def _uniquer(self, seq, idfun=None): if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def create_node(self, **kwargs): """ Creates and returns node. :keyword ex_network: link to a "Network" e.g., ``https://services.vcloudexpress...`` :type ex_network: ``str`` :keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. :type ex_vdc: ``str`` :keyword ex_cpus: number of virtual cpus (limit depends on provider) :type ex_cpus: ``int`` :type ex_row: ``str`` :type ex_group: ``str`` """ name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] # Some providers don't require a network link try: network = kwargs.get('ex_network', self.networks[0].get('href')) except IndexError: network = '' password = None auth = self._get_and_check_auth(kwargs.get('auth')) password = auth.password instantiate_xml = InstantiateVAppXML( name=name, template=image.id, net_href=network, cpus=str(kwargs.get('ex_cpus', 1)), memory=str(size.ram), password=password, row=kwargs.get('ex_row', None), group=kwargs.get('ex_group', None) ) vdc = self._get_vdc(kwargs.get('ex_vdc', None)) # Instantiate VM and get identifier. content_type = \ 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' res = self.connection.request( '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id), data=instantiate_xml.tostring(), method='POST', headers={'Content-Type': content_type} ) vapp_path = get_url_path(res.object.get('href')) # Deploy the VM from the identifier. res = self.connection.request('%s/action/deploy' % vapp_path, method='POST') self._wait_for_task_completion(res.object.get('href')) # Power on the VM. res = self.connection.request('%s/power/action/powerOn' % vapp_path, method='POST') res = self.connection.request(vapp_path) node = self._to_node(res.object) if getattr(auth, "generated", False): node.extra['password'] = auth.password return node class HostingComConnection(VCloudConnection): """ vCloud connection subclass for Hosting.com """ host = "vcloud.safesecureweb.com" def _get_auth_headers(self): """hosting.com doesn't follow the standard vCloud authentication API""" return { 'Authentication': base64.b64encode(b('%s:%s' % (self.user_id, self.key))), 'Content-Length': '0' } class HostingComDriver(VCloudNodeDriver): """ vCloud node driver for Hosting.com """ connectionCls = HostingComConnection class TerremarkConnection(VCloudConnection): """ vCloud connection subclass for Terremark """ host = "services.vcloudexpress.terremark.com" class TerremarkDriver(VCloudNodeDriver): """ vCloud node driver for Terremark """ connectionCls = TerremarkConnection def list_locations(self): return [NodeLocation(0, "Terremark Texas", 'US', self)] class VCloud_1_5_Connection(VCloudConnection): def _get_auth_headers(self): """Compatibility for using v1.5 API under vCloud Director 5.1""" return { 'Authorization': "Basic %s" % base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8'), 'Content-Length': '0', 'Accept': 'application/*+xml;version=1.5' } def _get_auth_token(self): if not self.token: # Log In self.connection.request(method='POST', url='/api/sessions', headers=self._get_auth_headers()) resp = self.connection.getresponse() headers = resp.headers # Set authorization token try: self.token = headers['x-vcloud-authorization'] except KeyError: raise InvalidCredsError() # Get the URL of the Organization body = ET.XML(resp.text) self.org_name = body.get('org') org_list_url = get_url_path( next((link for link in body.findall(fixxpath(body, 'Link')) if link.get('type') == 'application/vnd.vmware.vcloud.orgList+xml')).get('href') ) if self.proxy_url is not None: self.connection.set_http_proxy(self.proxy_url) self.connection.request(method='GET', url=org_list_url, headers=self.add_default_headers({})) body = ET.XML(self.connection.getresponse().text) self.driver.org = get_url_path( next((org for org in body.findall(fixxpath(body, 'Org')) if org.get('name') == self.org_name)).get('href') ) def add_default_headers(self, headers): headers['Accept'] = 'application/*+xml;version=1.5' headers['x-vcloud-authorization'] = self.token return headers class VCloud_5_5_Connection(VCloud_1_5_Connection): def add_default_headers(self, headers): headers['Accept'] = 'application/*+xml;version=5.5' headers['x-vcloud-authorization'] = self.token return headers class Instantiate_1_5_VAppXML(object): def __init__(self, name, template, network, vm_network=None, vm_fence=None): self.name = name self.template = template self.network = network self.vm_network = vm_network self.vm_fence = vm_fence self._build_xmltree() def tostring(self): return ET.tostring(self.root) def _build_xmltree(self): self.root = self._make_instantiation_root() if self.network is not None: instantionation_params = ET.SubElement(self.root, 'InstantiationParams') network_config_section = ET.SubElement(instantionation_params, 'NetworkConfigSection') ET.SubElement( network_config_section, 'Info', {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1'} ) network_config = ET.SubElement(network_config_section, 'NetworkConfig') self._add_network_association(network_config) self._add_vapp_template(self.root) def _make_instantiation_root(self): return ET.Element( 'InstantiateVAppTemplateParams', {'name': self.name, 'deploy': 'false', 'powerOn': 'false', 'xml:lang': 'en', 'xmlns': 'http://www.vmware.com/vcloud/v1.5', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance'} ) def _add_vapp_template(self, parent): return ET.SubElement( parent, 'Source', {'href': self.template} ) def _add_network_association(self, parent): if self.vm_network is None: # Don't set a custom vApp VM network name parent.set('networkName', self.network.get('name')) else: # Set a custom vApp VM network name parent.set('networkName', self.vm_network) configuration = ET.SubElement(parent, 'Configuration') ET.SubElement(configuration, 'ParentNetwork', {'href': self.network.get('href')}) if self.vm_fence is None: fencemode = self.network.find(fixxpath(self.network, 'Configuration/FenceMode')).text else: fencemode = self.vm_fence ET.SubElement(configuration, 'FenceMode').text = fencemode class VCloud_1_5_NodeDriver(VCloudNodeDriver): connectionCls = VCloud_1_5_Connection # Based on # http://pubs.vmware.com/vcloud-api-1-5/api_prog/ # GUID-843BE3AD-5EF6-4442-B864-BCAE44A51867.html NODE_STATE_MAP = {'-1': NodeState.UNKNOWN, '0': NodeState.PENDING, '1': NodeState.PENDING, '2': NodeState.PENDING, '3': NodeState.PENDING, '4': NodeState.RUNNING, '5': NodeState.RUNNING, '6': NodeState.UNKNOWN, '7': NodeState.UNKNOWN, '8': NodeState.STOPPED, '9': NodeState.UNKNOWN, '10': NodeState.UNKNOWN} def list_locations(self): return [NodeLocation(id=self.connection.host, name=self.connection.host, country="N/A", driver=self)] def ex_find_node(self, node_name, vdcs=None): """ Searches for node across specified vDCs. This is more effective than querying all nodes to get a single instance. :param node_name: The name of the node to search for :type node_name: ``str`` :param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched. :type vdcs: :class:`Vdc` :return: node instance or None if not found :rtype: :class:`Node` or ``None`` """ if not vdcs: vdcs = self.vdcs if not getattr(vdcs, '__iter__', False): vdcs = [vdcs] for vdc in vdcs: res = self.connection.request(get_url_path(vdc.id)) xpath = fixxpath(res.object, "ResourceEntities/ResourceEntity") entity_elems = res.object.findall(xpath) for entity_elem in entity_elems: if entity_elem.get('type') == \ 'application/vnd.vmware.vcloud.vApp+xml' and \ entity_elem.get('name') == node_name: path = get_url_path(entity_elem.get('href')) headers = {'Content-Type': 'application/vnd.vmware.vcloud.vApp+xml'} res = self.connection.request(path, headers=headers) return self._to_node(res.object) return None def destroy_node(self, node): try: self.ex_undeploy_node(node) except Exception: # Some vendors don't implement undeploy at all yet, # so catch this and move on. pass res = self.connection.request(get_url_path(node.id), method='DELETE') return res.status == httplib.ACCEPTED def reboot_node(self, node): res = self.connection.request('%s/power/action/reset' % get_url_path(node.id), method='POST') if res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]: self._wait_for_task_completion(res.object.get('href')) return True else: return False def ex_deploy_node(self, node, ex_force_customization=False): """ Deploys existing node. Equal to vApp "start" operation. :param node: The node to be deployed :type node: :class:`Node` :param ex_force_customization: Used to specify whether to force customization on deployment, if not set default value is False. :type ex_force_customization: ``bool`` :rtype: :class:`Node` """ if ex_force_customization: vms = self._get_vm_elements(node.id) for vm in vms: self._ex_deploy_node_or_vm(vm.get('href'), ex_force_customization=True) else: self._ex_deploy_node_or_vm(node.id) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object) def _ex_deploy_node_or_vm(self, vapp_or_vm_path, ex_force_customization=False): data = {'powerOn': 'true', 'forceCustomization': str(ex_force_customization).lower(), 'xmlns': 'http://www.vmware.com/vcloud/v1.5'} deploy_xml = ET.Element('DeployVAppParams', data) path = get_url_path(vapp_or_vm_path) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.deployVAppParams+xml' } res = self.connection.request('%s/action/deploy' % path, data=ET.tostring(deploy_xml), method='POST', headers=headers) self._wait_for_task_completion(res.object.get('href')) def ex_undeploy_node(self, node): """ Undeploys existing node. Equal to vApp "stop" operation. :param node: The node to be deployed :type node: :class:`Node` :rtype: :class:`Node` """ data = {'xmlns': 'http://www.vmware.com/vcloud/v1.5'} undeploy_xml = ET.Element('UndeployVAppParams', data) undeploy_power_action_xml = ET.SubElement(undeploy_xml, 'UndeployPowerAction') undeploy_power_action_xml.text = 'shutdown' headers = { 'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml' } try: res = self.connection.request( '%s/action/undeploy' % get_url_path(node.id), data=ET.tostring(undeploy_xml), method='POST', headers=headers) self._wait_for_task_completion(res.object.get('href')) except Exception: undeploy_power_action_xml.text = 'powerOff' res = self.connection.request( '%s/action/undeploy' % get_url_path(node.id), data=ET.tostring(undeploy_xml), method='POST', headers=headers) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object) def ex_power_off_node(self, node): """ Powers on all VMs under specified node. VMs need to be This operation is allowed only when the vApp/VM is powered on. :param node: The node to be powered off :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_power_operation(node, 'powerOff') def ex_power_on_node(self, node): """ Powers on all VMs under specified node. This operation is allowed only when the vApp/VM is powered off or suspended. :param node: The node to be powered on :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_power_operation(node, 'powerOn') def ex_shutdown_node(self, node): """ Shutdowns all VMs under specified node. This operation is allowed only when the vApp/VM is powered on. :param node: The node to be shut down :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_power_operation(node, 'shutdown') def ex_suspend_node(self, node): """ Suspends all VMs under specified node. This operation is allowed only when the vApp/VM is powered on. :param node: The node to be suspended :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_power_operation(node, 'suspend') def _perform_power_operation(self, node, operation): res = self.connection.request( '%s/power/action/%s' % (get_url_path(node.id), operation), method='POST') self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object) def ex_get_control_access(self, node): """ Returns the control access settings for specified node. :param node: node to get the control access for :type node: :class:`Node` :rtype: :class:`ControlAccess` """ res = self.connection.request( '%s/controlAccess' % get_url_path(node.id)) everyone_access_level = None is_shared_elem = res.object.find( fixxpath(res.object, "IsSharedToEveryone")) if is_shared_elem is not None and is_shared_elem.text == 'true': everyone_access_level = res.object.find( fixxpath(res.object, "EveryoneAccessLevel")).text # Parse all subjects subjects = [] xpath = fixxpath(res.object, "AccessSettings/AccessSetting") for elem in res.object.findall(xpath): access_level = elem.find(fixxpath(res.object, "AccessLevel")).text subject_elem = elem.find(fixxpath(res.object, "Subject")) if subject_elem.get('type') == \ 'application/vnd.vmware.admin.group+xml': subj_type = 'group' else: subj_type = 'user' path = get_url_path(subject_elem.get('href')) res = self.connection.request(path) name = res.object.get('name') subject = Subject(type=subj_type, name=name, access_level=access_level, id=subject_elem.get('href')) subjects.append(subject) return ControlAccess(node, everyone_access_level, subjects) def ex_set_control_access(self, node, control_access): """ Sets control access for the specified node. :param node: node :type node: :class:`Node` :param control_access: control access settings :type control_access: :class:`ControlAccess` :rtype: ``None`` """ xml = ET.Element('ControlAccessParams', {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) shared_to_everyone = ET.SubElement(xml, 'IsSharedToEveryone') if control_access.everyone_access_level: shared_to_everyone.text = 'true' everyone_access_level = ET.SubElement(xml, 'EveryoneAccessLevel') everyone_access_level.text = control_access.everyone_access_level else: shared_to_everyone.text = 'false' # Set subjects if control_access.subjects: access_settings_elem = ET.SubElement(xml, 'AccessSettings') for subject in control_access.subjects: setting = ET.SubElement(access_settings_elem, 'AccessSetting') if subject.id: href = subject.id else: res = self.ex_query(type=subject.type, filter='name==' + subject.name) if not res: raise LibcloudError('Specified subject "%s %s" not found ' % (subject.type, subject.name)) href = res[0]['href'] ET.SubElement(setting, 'Subject', {'href': href}) ET.SubElement(setting, 'AccessLevel').text = subject.access_level headers = { 'Content-Type': 'application/vnd.vmware.vcloud.controlAccess+xml' } self.connection.request( '%s/action/controlAccess' % get_url_path(node.id), data=ET.tostring(xml), headers=headers, method='POST') def ex_get_metadata(self, node): """ :param node: node :type node: :class:`Node` :return: dictionary mapping metadata keys to metadata values :rtype: dictionary mapping ``str`` to ``str`` """ res = self.connection.request('%s/metadata' % (get_url_path(node.id))) xpath = fixxpath(res.object, 'MetadataEntry') metadata_entries = res.object.findall(xpath) res_dict = {} for entry in metadata_entries: key = entry.findtext(fixxpath(res.object, 'Key')) value = entry.findtext(fixxpath(res.object, 'Value')) res_dict[key] = value return res_dict def ex_set_metadata_entry(self, node, key, value): """ :param node: node :type node: :class:`Node` :param key: metadata key to be set :type key: ``str`` :param value: metadata value to be set :type value: ``str`` :rtype: ``None`` """ metadata_elem = ET.Element( 'Metadata', {'xmlns': "http://www.vmware.com/vcloud/v1.5", 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} ) entry = ET.SubElement(metadata_elem, 'MetadataEntry') key_elem = ET.SubElement(entry, 'Key') key_elem.text = key value_elem = ET.SubElement(entry, 'Value') value_elem.text = value # send it back to the server res = self.connection.request( '%s/metadata' % get_url_path(node.id), data=ET.tostring(metadata_elem), headers={ 'Content-Type': 'application/vnd.vmware.vcloud.metadata+xml' }, method='POST') self._wait_for_task_completion(res.object.get('href')) def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None, sort_desc=None): """ Queries vCloud for specified type. See http://www.vmware.com/pdf/vcd_15_api_guide.pdf for details. Each element of the returned list is a dictionary with all attributes from the record. :param type: type to query (r.g. user, group, vApp etc.) :type type: ``str`` :param filter: filter expression (see documentation for syntax) :type filter: ``str`` :param page: page number :type page: ``int`` :param page_size: page size :type page_size: ``int`` :param sort_asc: sort in ascending order by specified field :type sort_asc: ``str`` :param sort_desc: sort in descending order by specified field :type sort_desc: ``str`` :rtype: ``list`` of dict """ # This is a workaround for filter parameter encoding # the urllib encodes (name==Developers%20Only) into # %28name%3D%3DDevelopers%20Only%29) which is not accepted by vCloud params = { 'type': type, 'pageSize': page_size, 'page': page, } if sort_asc: params['sortAsc'] = sort_asc if sort_desc: params['sortDesc'] = sort_desc url = '/api/query?' + urlencode(params) if filter: if not filter.startswith('('): filter = '(' + filter + ')' url += '&filter=' + filter.replace(' ', '+') results = [] res = self.connection.request(url) for elem in res.object: if not elem.tag.endswith('Link'): result = elem.attrib result['type'] = elem.tag.split('}')[1] results.append(result) return results def create_node(self, **kwargs): """ Creates and returns node. If the source image is: - vApp template - a new vApp is instantiated from template - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise resource busy error is raised. @inherits: :class:`NodeDriver.create_node` :keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be cloned. :type image: :class:`NodeImage` or :class:`Node` :keyword ex_network: Organisation's network name for attaching vApp VMs to. :type ex_network: ``str`` :keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. :type ex_vdc: ``str`` :keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters long and follow the host name requirements. :type ex_vm_names: ``list`` of ``str`` :keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM. :type ex_vm_cpu: ``int`` :keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM. :type ex_vm_memory: ``int`` :keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM. Useful for creating users & pushing out public SSH keys etc. :type ex_vm_script: ``str`` :keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF originating from outside of the vCloud. :type ex_vm_network: ``str`` :keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent organisation network (ex_network). :type ex_vm_fence: ``str`` :keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections. :type ex_vm_ipmode: ``str`` :keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation :type ex_deploy: ``bool`` :keyword ex_force_customization: Used to specify whether to force customization on deployment, if not set default value is False. :type ex_force_customization: ``bool`` :keyword ex_clone_timeout: timeout in seconds for clone/instantiate VM operation. Cloning might be a time consuming operation especially when linked clones are disabled or VMs are created on different datastores. Overrides the default task completion value. :type ex_clone_timeout: ``int`` :keyword ex_admin_password: set the node admin password explicitly. :type ex_admin_password: ``str`` """ name = kwargs['name'] image = kwargs['image'] ex_vm_names = kwargs.get('ex_vm_names') ex_vm_cpu = kwargs.get('ex_vm_cpu') ex_vm_memory = kwargs.get('ex_vm_memory') ex_vm_script = kwargs.get('ex_vm_script') ex_vm_fence = kwargs.get('ex_vm_fence', None) ex_network = kwargs.get('ex_network', None) ex_vm_network = kwargs.get('ex_vm_network', None) ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None) ex_deploy = kwargs.get('ex_deploy', True) ex_force_customization = kwargs.get('ex_force_customization', False) ex_vdc = kwargs.get('ex_vdc', None) ex_clone_timeout = kwargs.get('ex_clone_timeout', DEFAULT_TASK_COMPLETION_TIMEOUT) ex_admin_password = kwargs.get('ex_admin_password', None) self._validate_vm_names(ex_vm_names) self._validate_vm_cpu(ex_vm_cpu) self._validate_vm_memory(ex_vm_memory) self._validate_vm_fence(ex_vm_fence) self._validate_vm_ipmode(ex_vm_ipmode) ex_vm_script = self._validate_vm_script(ex_vm_script) # Some providers don't require a network link if ex_network: network_href = self._get_network_href(ex_network) network_elem = self.connection.request( get_url_path(network_href)).object else: network_elem = None vdc = self._get_vdc(ex_vdc) if self._is_node(image): vapp_name, vapp_href = self._clone_node(name, image, vdc, ex_clone_timeout) else: vapp_name, vapp_href = self._instantiate_node(name, image, network_elem, vdc, ex_vm_network, ex_vm_fence, ex_clone_timeout) self._change_vm_names(vapp_href, ex_vm_names) self._change_vm_cpu(vapp_href, ex_vm_cpu) self._change_vm_memory(vapp_href, ex_vm_memory) self._change_vm_script(vapp_href, ex_vm_script) self._change_vm_ipmode(vapp_href, ex_vm_ipmode) if ex_admin_password is not None: self.ex_change_vm_admin_password(vapp_href, ex_admin_password) # Power on the VM. if ex_deploy: res = self.connection.request(get_url_path(vapp_href)) node = self._to_node(res.object) # Retry 3 times: when instantiating large number of VMs at the same # time some may fail on resource allocation retry = 3 while True: try: self.ex_deploy_node(node, ex_force_customization) break except Exception: if retry <= 0: raise retry -= 1 time.sleep(10) res = self.connection.request(get_url_path(vapp_href)) node = self._to_node(res.object) return node def _instantiate_node(self, name, image, network_elem, vdc, vm_network, vm_fence, instantiate_timeout): instantiate_xml = Instantiate_1_5_VAppXML( name=name, template=image.id, network=network_elem, vm_network=vm_network, vm_fence=vm_fence ) # Instantiate VM and get identifier. headers = { 'Content-Type': 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' } res = self.connection.request( '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id), data=instantiate_xml.tostring(), method='POST', headers=headers ) vapp_name = res.object.get('name') vapp_href = res.object.get('href') task_href = res.object.find(fixxpath(res.object, "Tasks/Task")).get( 'href') self._wait_for_task_completion(task_href, instantiate_timeout) return vapp_name, vapp_href def _clone_node(self, name, sourceNode, vdc, clone_timeout): clone_xml = ET.Element( "CloneVAppParams", {'name': name, 'deploy': 'false', 'powerOn': 'false', 'xmlns': "http://www.vmware.com/vcloud/v1.5", 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} ) ET.SubElement(clone_xml, 'Description').text = 'Clone of ' + sourceNode.name ET.SubElement(clone_xml, 'Source', {'href': sourceNode.id}) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.cloneVAppParams+xml' } res = self.connection.request( '%s/action/cloneVApp' % get_url_path(vdc.id), data=ET.tostring(clone_xml), method='POST', headers=headers ) vapp_name = res.object.get('name') vapp_href = res.object.get('href') task_href = res.object.find( fixxpath(res.object, "Tasks/Task")).get('href') self._wait_for_task_completion(task_href, clone_timeout) res = self.connection.request(get_url_path(vapp_href)) vms = res.object.findall(fixxpath(res.object, "Children/Vm")) # Fix the networking for VMs for i, vm in enumerate(vms): # Remove network network_xml = ET.Element("NetworkConnectionSection", { 'ovf:required': 'false', 'xmlns': "http://www.vmware.com/vcloud/v1.5", 'xmlns:ovf': 'http://schemas.dmtf.org/ovf/envelope/1'}) ET.SubElement(network_xml, "ovf:Info").text = \ 'Specifies the available VM network connections' headers = { 'Content-Type': 'application/vnd.vmware.vcloud.networkConnectionSection+xml' } res = self.connection.request( '%s/networkConnectionSection' % get_url_path(vm.get('href')), data=ET.tostring(network_xml), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) # Re-add network network_xml = vm.find(fixxpath(vm, 'NetworkConnectionSection')) network_conn_xml = network_xml.find( fixxpath(network_xml, 'NetworkConnection')) network_conn_xml.set('needsCustomization', 'true') network_conn_xml.remove( network_conn_xml.find(fixxpath(network_xml, 'IpAddress'))) network_conn_xml.remove( network_conn_xml.find(fixxpath(network_xml, 'MACAddress'))) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.networkConnectionSection+xml' } res = self.connection.request( '%s/networkConnectionSection' % get_url_path(vm.get('href')), data=ET.tostring(network_xml), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) return vapp_name, vapp_href def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu): """ Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified :type vapp_or_vm_id: ``str`` :keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs :type vm_cpu: ``int`` :rtype: ``None`` """ self._validate_vm_cpu(vm_cpu) self._change_vm_cpu(vapp_or_vm_id, vm_cpu) def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory): """ Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified :type vapp_or_vm_id: ``str`` :keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs :type vm_memory: ``int`` :rtype: ``None`` """ self._validate_vm_memory(vm_memory) self._change_vm_memory(vapp_or_vm_id, vm_memory) def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size): """ Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified :type vapp_or_vm_id: ``str`` :keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs :type vm_disk_size: ``int`` :rtype: ``None`` """ self._validate_vm_disk_size(vm_disk_size) self._add_vm_disk(vapp_or_vm_id, vm_disk_size) @staticmethod def _validate_vm_names(names): if names is None: return hname_re = re.compile( '^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9]*)[\-])*([A-Za-z]|[A-Za-z][A-Za-z0-9]*[A-Za-z0-9])$') # NOQA for name in names: if len(name) > 15: raise ValueError( 'The VM name "' + name + '" is too long for the computer ' 'name (max 15 chars allowed).') if not hname_re.match(name): raise ValueError('The VM name "' + name + '" can not be ' 'used. "' + name + '" is not a valid ' 'computer name for the VM.') @staticmethod def _validate_vm_memory(vm_memory): if vm_memory is None: return elif vm_memory not in VIRTUAL_MEMORY_VALS: raise ValueError( '%s is not a valid vApp VM memory value' % vm_memory) @staticmethod def _validate_vm_cpu(vm_cpu): if vm_cpu is None: return elif vm_cpu not in VIRTUAL_CPU_VALS_1_5: raise ValueError('%s is not a valid vApp VM CPU value' % vm_cpu) @staticmethod def _validate_vm_disk_size(vm_disk): if vm_disk is None: return elif int(vm_disk) < 0: raise ValueError('%s is not a valid vApp VM disk space value', vm_disk) @staticmethod def _validate_vm_script(vm_script): if vm_script is None: return # Try to locate the script file if not os.path.isabs(vm_script): vm_script = os.path.expanduser(vm_script) vm_script = os.path.abspath(vm_script) if not os.path.isfile(vm_script): raise LibcloudError( "%s the VM script file does not exist" % vm_script) try: open(vm_script).read() except: raise return vm_script @staticmethod def _validate_vm_fence(vm_fence): if vm_fence is None: return elif vm_fence not in FENCE_MODE_VALS_1_5: raise ValueError('%s is not a valid fencing mode value' % vm_fence) @staticmethod def _validate_vm_ipmode(vm_ipmode): if vm_ipmode is None: return elif vm_ipmode == 'MANUAL': raise NotImplementedError( 'MANUAL IP mode: The interface for supplying ' 'IPAddress does not exist yet') elif vm_ipmode not in IP_MODE_VALS_1_5: raise ValueError( '%s is not a valid IP address allocation mode value' % vm_ipmode) def _change_vm_names(self, vapp_or_vm_id, vm_names): if vm_names is None: return vms = self._get_vm_elements(vapp_or_vm_id) for i, vm in enumerate(vms): if len(vm_names) <= i: return # Get GuestCustomizationSection res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href'))) # Update GuestCustomizationSection res.object.find( fixxpath(res.object, 'ComputerName')).text = vm_names[i] # Remove AdminPassword from customization section admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword')) if admin_pass is not None: res.object.remove(admin_pass) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.guestCustomizationSection+xml' } res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) # Update Vm name req_xml = ET.Element("Vm", { 'name': vm_names[i], 'xmlns': "http://www.vmware.com/vcloud/v1.5"}) res = self.connection.request( get_url_path(vm.get('href')), data=ET.tostring(req_xml), method='PUT', headers={ 'Content-Type': 'application/vnd.vmware.vcloud.vm+xml'} ) self._wait_for_task_completion(res.object.get('href')) def _change_vm_cpu(self, vapp_or_vm_id, vm_cpu): if vm_cpu is None: return vms = self._get_vm_elements(vapp_or_vm_id) for vm in vms: # Get virtualHardwareSection/cpu section res = self.connection.request( '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href'))) # Update VirtualQuantity field xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData}VirtualQuantity') res.object.find(xpath).text = str(vm_cpu) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml' } res = self.connection.request( '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _change_vm_memory(self, vapp_or_vm_id, vm_memory): if vm_memory is None: return vms = self._get_vm_elements(vapp_or_vm_id) for vm in vms: # Get virtualHardwareSection/memory section res = self.connection.request( '%s/virtualHardwareSection/memory' % get_url_path(vm.get('href'))) # Update VirtualQuantity field xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData}VirtualQuantity') res.object.find(xpath).text = str(vm_memory) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml' } res = self.connection.request( '%s/virtualHardwareSection/memory' % get_url_path( vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _add_vm_disk(self, vapp_or_vm_id, vm_disk): if vm_disk is None: return rasd_ns = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ResourceAllocationSettingData}') vms = self._get_vm_elements(vapp_or_vm_id) for vm in vms: # Get virtualHardwareSection/disks section res = self.connection.request( '%s/virtualHardwareSection/disks' % get_url_path(vm.get('href'))) existing_ids = [] new_disk = None for item in res.object.findall(fixxpath(res.object, 'Item')): # Clean Items from unnecessary stuff for elem in item: if elem.tag == '%sInstanceID' % rasd_ns: existing_ids.append(int(elem.text)) if elem.tag in ['%sAddressOnParent' % rasd_ns, '%sParent' % rasd_ns]: item.remove(elem) if item.find('%sHostResource' % rasd_ns) is not None: new_disk = item new_disk = copy.deepcopy(new_disk) disk_id = max(existing_ids) + 1 new_disk.find('%sInstanceID' % rasd_ns).text = str(disk_id) new_disk.find('%sElementName' % rasd_ns).text = 'Hard Disk ' + str(disk_id) new_disk.find('%sHostResource' % rasd_ns).set( fixxpath(new_disk, 'capacity'), str(int(vm_disk) * 1024)) res.object.append(new_disk) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.rasditemslist+xml' } res = self.connection.request( '%s/virtualHardwareSection/disks' % get_url_path( vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _change_vm_script(self, vapp_or_vm_id, vm_script): if vm_script is None: return vms = self._get_vm_elements(vapp_or_vm_id) try: script = open(vm_script).read() except: return # ElementTree escapes script characters automatically. Escape # requirements: # http://www.vmware.com/support/vcd/doc/rest-api-doc-1.5-html/types/ # GuestCustomizationSectionType.html for vm in vms: # Get GuestCustomizationSection res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href'))) # Attempt to update any existing CustomizationScript element try: res.object.find( fixxpath(res.object, 'CustomizationScript')).text = script except: # CustomizationScript section does not exist, insert it just # before ComputerName for i, e in enumerate(res.object): if e.tag == \ '{http://www.vmware.com/vcloud/v1.5}ComputerName': break e = ET.Element( '{http://www.vmware.com/vcloud/v1.5}CustomizationScript') e.text = script res.object.insert(i, e) # Remove AdminPassword from customization section due to an API # quirk admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword')) if admin_pass is not None: res.object.remove(admin_pass) # Update VM's GuestCustomizationSection headers = { 'Content-Type': 'application/vnd.vmware.vcloud.guestCustomizationSection+xml' } res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _change_vm_ipmode(self, vapp_or_vm_id, vm_ipmode): if vm_ipmode is None: return vms = self._get_vm_elements(vapp_or_vm_id) for vm in vms: res = self.connection.request( '%s/networkConnectionSection' % get_url_path(vm.get('href'))) net_conns = res.object.findall( fixxpath(res.object, 'NetworkConnection')) for c in net_conns: c.find(fixxpath(c, 'IpAddressAllocationMode')).text = vm_ipmode headers = { 'Content-Type': 'application/vnd.vmware.vcloud.networkConnectionSection+xml' } res = self.connection.request( '%s/networkConnectionSection' % get_url_path(vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _update_or_insert_section(self, res, section, prev_section, text): try: res.object.find( fixxpath(res.object, section)).text = text except: # "section" section does not exist, insert it just # before "prev_section" for i, e in enumerate(res.object): tag = '{http://www.vmware.com/vcloud/v1.5}%s' % prev_section if e.tag == tag: break e = ET.Element( '{http://www.vmware.com/vcloud/v1.5}%s' % section) e.text = text res.object.insert(i, e) return res def ex_change_vm_admin_password(self, vapp_or_vm_id, ex_admin_password): """ Changes the admin (or root) password of VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified :type vapp_or_vm_id: ``str`` :keyword ex_admin_password: admin password to be used. :type ex_admin_password: ``str`` :rtype: ``None`` """ if ex_admin_password is None: return vms = self._get_vm_elements(vapp_or_vm_id) for vm in vms: # Get GuestCustomizationSection res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href'))) headers = { 'Content-Type': 'application/vnd.vmware.vcloud.guestCustomizationSection+xml' } # Fix API quirk. # If AdminAutoLogonEnabled==False the guestCustomizationSection # must have AdminAutoLogonCount==0, even though # it might have AdminAutoLogonCount==1 when requesting it for # the first time. auto_logon = res.object.find( fixxpath(res.object, "AdminAutoLogonEnabled")) if auto_logon is not None and auto_logon.text == 'false': self._update_or_insert_section(res, "AdminAutoLogonCount", "ResetPasswordRequired", '0') # If we are establishing a password we do not want it # to be automatically chosen. self._update_or_insert_section(res, 'AdminPasswordAuto', 'AdminPassword', 'false') # API does not allow to set AdminPassword if # AdminPasswordEnabled is not enabled. self._update_or_insert_section(res, 'AdminPasswordEnabled', 'AdminPasswordAuto', 'true') self._update_or_insert_section(res, 'AdminPassword', 'AdminAutoLogonEnabled', ex_admin_password) res = self.connection.request( '%s/guestCustomizationSection' % get_url_path(vm.get('href')), data=ET.tostring(res.object), method='PUT', headers=headers ) self._wait_for_task_completion(res.object.get('href')) def _get_network_href(self, network_name): network_href = None # Find the organisation's network href res = self.connection.request(self.org) links = res.object.findall(fixxpath(res.object, 'Link')) for l in links: if l.attrib['type'] == \ 'application/vnd.vmware.vcloud.orgNetwork+xml' \ and l.attrib['name'] == network_name: network_href = l.attrib['href'] if network_href is None: raise ValueError( '%s is not a valid organisation network name' % network_name) else: return network_href def _get_vm_elements(self, vapp_or_vm_id): res = self.connection.request(get_url_path(vapp_or_vm_id)) if res.object.tag.endswith('VApp'): vms = res.object.findall(fixxpath(res.object, 'Children/Vm')) elif res.object.tag.endswith('Vm'): vms = [res.object] else: raise ValueError( 'Specified ID value is not a valid VApp or Vm identifier.') return vms def _is_node(self, node_or_image): return isinstance(node_or_image, Node) def _to_node(self, node_elm): # Parse snapshots and VMs as extra if node_elm.find(fixxpath(node_elm, "SnapshotSection")) is None: snapshots = None else: snapshots = [] for snapshot_elem in node_elm.findall( fixxpath(node_elm, 'SnapshotSection/Snapshot')): snapshots.append({ "created": snapshot_elem.get("created"), "poweredOn": snapshot_elem.get("poweredOn"), "size": snapshot_elem.get("size"), }) vms = [] for vm_elem in node_elm.findall(fixxpath(node_elm, 'Children/Vm')): public_ips = [] private_ips = [] xpath = fixxpath(vm_elem, 'NetworkConnectionSection/NetworkConnection') for connection in vm_elem.findall(xpath): ip = connection.find(fixxpath(connection, "IpAddress")) if ip is not None: private_ips.append(ip.text) external_ip = connection.find( fixxpath(connection, "ExternalIpAddress")) if external_ip is not None: public_ips.append(external_ip.text) elif ip is not None: public_ips.append(ip.text) xpath = ('{http://schemas.dmtf.org/ovf/envelope/1}' 'OperatingSystemSection') os_type_elem = vm_elem.find(xpath) if os_type_elem is not None: os_type = os_type_elem.get( '{http://www.vmware.com/schema/ovf}osType') else: os_type = None vm = { 'id': vm_elem.get('href'), 'name': vm_elem.get('name'), 'state': self.NODE_STATE_MAP[vm_elem.get('status')], 'public_ips': public_ips, 'private_ips': private_ips, 'os_type': os_type } vms.append(vm) # Take the node IP addresses from all VMs public_ips = [] private_ips = [] for vm in vms: public_ips.extend(vm['public_ips']) private_ips.extend(vm['private_ips']) # Find vDC vdc_id = next(link.get('href') for link in node_elm.findall(fixxpath(node_elm, 'Link')) if link.get('type') == 'application/vnd.vmware.vcloud.vdc+xml') vdc = next(vdc for vdc in self.vdcs if vdc.id == vdc_id) extra = {'vdc': vdc.name, 'vms': vms} if snapshots is not None: extra['snapshots'] = snapshots node = Node(id=node_elm.get('href'), name=node_elm.get('name'), state=self.NODE_STATE_MAP[node_elm.get('status')], public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra) return node def _to_vdc(self, vdc_elm): def get_capacity_values(capacity_elm): if capacity_elm is None: return None limit = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Limit'))) used = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Used'))) units = capacity_elm.findtext(fixxpath(capacity_elm, 'Units')) return Capacity(limit, used, units) cpu = get_capacity_values( vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Cpu'))) memory = get_capacity_values( vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Memory'))) storage = get_capacity_values( vdc_elm.find(fixxpath(vdc_elm, 'StorageCapacity'))) return Vdc(id=vdc_elm.get('href'), name=vdc_elm.get('name'), driver=self, allocation_model=vdc_elm.findtext( fixxpath(vdc_elm, 'AllocationModel')), cpu=cpu, memory=memory, storage=storage) class VCloud_5_1_NodeDriver(VCloud_1_5_NodeDriver): @staticmethod def _validate_vm_memory(vm_memory): if vm_memory is None: return None elif (vm_memory % 4) != 0: # The vcd 5.1 virtual machine memory size must be a multiple of 4 # MB raise ValueError( '%s is not a valid vApp VM memory value' % (vm_memory)) class VCloud_5_5_NodeDriver(VCloud_5_1_NodeDriver): '''Use 5.5 Connection class to explicitly set 5.5 for the version in Accept headers ''' connectionCls = VCloud_5_5_Connection def ex_create_snapshot(self, node): """ Creates new snapshot of a virtual machine or of all the virtual machines in a vApp. Prior to creation of the new snapshots, any existing user created snapshots associated with the virtual machines are removed. :param node: node :type node: :class:`Node` :rtype: :class:`Node` """ snapshot_xml = ET.Element( "CreateSnapshotParams", {'memory': 'true', 'name': 'name', 'quiesce': 'true', 'xmlns': "http://www.vmware.com/vcloud/v1.5", 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} ) ET.SubElement(snapshot_xml, 'Description').text = 'Description' content_type = 'application/vnd.vmware.vcloud.createSnapshotParams+xml' headers = { 'Content-Type': content_type } return self._perform_snapshot_operation(node, "createSnapshot", snapshot_xml, headers) def ex_remove_snapshots(self, node): """ Removes all user created snapshots for a vApp or virtual machine. :param node: node :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_snapshot_operation(node, "removeAllSnapshots", None, None) def ex_revert_to_snapshot(self, node): """ Reverts a vApp or virtual machine to the current snapshot, if any. :param node: node :type node: :class:`Node` :rtype: :class:`Node` """ return self._perform_snapshot_operation(node, "revertToCurrentSnapshot", None, None) def _perform_snapshot_operation(self, node, operation, xml_data, headers): res = self.connection.request( '%s/action/%s' % (get_url_path(node.id), operation), data=ET.tostring(xml_data) if xml_data else None, method='POST', headers=headers) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object) def ex_acquire_mks_ticket(self, vapp_or_vm_id, vm_num=0): """ Retrieve a mks ticket that you can use to gain access to the console of a running VM. If successful, returns a dict with the following keys: - host: host (or proxy) through which the console connection is made - vmx: a reference to the VMX file of the VM for which this ticket was issued - ticket: screen ticket to use to authenticate the client - port: host port to be used for console access :param vapp_or_vm_id: vApp or VM ID you want to connect to. :type vapp_or_vm_id: ``str`` :param vm_num: If a vApp ID is provided, vm_num is position in the vApp VM list of the VM you want to get a screen ticket. Default is 0. :type vm_num: ``int`` :rtype: ``dict`` """ vm = self._get_vm_elements(vapp_or_vm_id)[vm_num] try: res = self.connection.request('%s/screen/action/acquireMksTicket' % (get_url_path(vm.get('href'))), method='POST') output = { "host": res.object.find(fixxpath(res.object, 'Host')).text, "vmx": res.object.find(fixxpath(res.object, 'Vmx')).text, "ticket": res.object.find(fixxpath(res.object, 'Ticket')).text, "port": res.object.find(fixxpath(res.object, 'Port')).text, } return output except: return None apache-libcloud-2.2.1/libcloud/compute/drivers/ovh.py0000664000175000017500000004463213153541406022526 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Ovh driver """ from libcloud.utils.py3 import httplib from libcloud.common.ovh import API_ROOT, OvhConnection from libcloud.compute.base import (NodeDriver, NodeSize, Node, NodeLocation, NodeImage, StorageVolume, VolumeSnapshot) from libcloud.compute.types import (Provider, StorageVolumeState, VolumeSnapshotState) from libcloud.compute.drivers.openstack import OpenStackNodeDriver from libcloud.compute.drivers.openstack import OpenStackKeyPair class OvhNodeDriver(NodeDriver): """ Libcloud driver for the Ovh API For more information on the Ovh API, read the official reference: https://api.ovh.com/console/ """ type = Provider.OVH name = "Ovh" website = 'https://www.ovh.com/' connectionCls = OvhConnection features = {'create_node': ['ssh_key']} api_name = 'ovh' NODE_STATE_MAP = OpenStackNodeDriver.NODE_STATE_MAP VOLUME_STATE_MAP = OpenStackNodeDriver.VOLUME_STATE_MAP SNAPSHOT_STATE_MAP = OpenStackNodeDriver.SNAPSHOT_STATE_MAP def __init__(self, key, secret, ex_project_id, ex_consumer_key=None): """ Instantiate the driver with the given API credentials. :param key: Your application key (required) :type key: ``str`` :param secret: Your application secret (required) :type secret: ``str`` :param ex_project_id: Your project ID :type ex_project_id: ``str`` :param ex_consumer_key: Your consumer key (required) :type ex_consumer_key: ``str`` :rtype: ``None`` """ self.datacenter = None self.project_id = ex_project_id self.consumer_key = ex_consumer_key NodeDriver.__init__(self, key, secret, ex_consumer_key=ex_consumer_key) def _get_project_action(self, suffix): base_url = '%s/cloud/project/%s/' % (API_ROOT, self.project_id) return base_url + suffix def list_nodes(self, location=None): """ List all nodes. :keyword location: Location (region) used as filter :type location: :class:`NodeLocation` :return: List of node objects :rtype: ``list`` of :class:`Node` """ action = self._get_project_action('instance') data = {} if location: data['region'] = location.id response = self.connection.request(action, data=data) return self._to_nodes(response.object) def ex_get_node(self, node_id): """ Get a individual node. :keyword node_id: Node's ID :type node_id: ``str`` :return: Created node :rtype : :class:`Node` """ action = self._get_project_action('instance/%s' % node_id) response = self.connection.request(action, method='GET') return self._to_node(response.object) def create_node(self, name, image, size, location, ex_keyname=None): """ Create a new node :keyword name: Name of created node :type name: ``str`` :keyword image: Image used for node :type image: :class:`NodeImage` :keyword size: Size (flavor) used for node :type size: :class:`NodeSize` :keyword location: Location (region) where to create node :type location: :class:`NodeLocation` :keyword ex_keyname: Name of SSH key used :type ex_keyname: ``str`` :return: Created node :rtype : :class:`Node` """ action = self._get_project_action('instance') data = { 'name': name, 'imageId': image.id, 'flavorId': size.id, 'region': location.id, } if ex_keyname: key_id = self.get_key_pair(ex_keyname, location).extra['id'] data['sshKeyId'] = key_id response = self.connection.request(action, data=data, method='POST') return self._to_node(response.object) def destroy_node(self, node): action = self._get_project_action('instance/%s' % node.id) self.connection.request(action, method='DELETE') return True def list_sizes(self, location=None): action = self._get_project_action('flavor') params = {} if location: params['region'] = location.id response = self.connection.request(action, params=params) return self._to_sizes(response.object) def ex_get_size(self, size_id): """ Get an individual size (flavor). :keyword size_id: Size's ID :type size_id: ``str`` :return: Size :rtype: :class:`NodeSize` """ action = self._get_project_action('flavor/%s' % size_id) response = self.connection.request(action) return self._to_size(response.object) def list_images(self, location=None, ex_size=None): """ List available images :keyword location: Location (region) used as filter :type location: :class:`NodeLocation` :keyword ex_size: Exclude images which are uncompatible with given size :type ex_size: :class:`NodeImage` :return: List of images :rtype : ``list`` of :class:`NodeImage` """ action = self._get_project_action('image') params = {} if location: params['region'] = location.id if ex_size: params['flavorId'] = ex_size.id response = self.connection.request(action, params=params) return self._to_images(response.object) def get_image(self, image_id): action = self._get_project_action('image/%s' % image_id) response = self.connection.request(action) return self._to_image(response.object) def list_locations(self): action = self._get_project_action('region') data = self.connection.request(action) return self._to_locations(data.object) def list_key_pairs(self, ex_location=None): """ List available SSH public keys. :keyword ex_location: Location (region) used as filter :type ex_location: :class:`NodeLocation` :return: Public keys :rtype: ``list``of :class:`KeyPair` """ action = self._get_project_action('sshkey') params = {} if ex_location: params['region'] = ex_location.id response = self.connection.request(action, params=params) return self._to_key_pairs(response.object) def get_key_pair(self, name, ex_location=None): """ Get an individual SSH public key by its name and location. :param name: Name of the key pair to retrieve. :type name: ``str`` :keyword ex_location: Key's region :type ex_location: :class:`NodeLocation` :return: Public key :rtype: :class:`KeyPair` """ # Keys are indexed with ID keys = [key for key in self.list_key_pairs(ex_location) if key.name == name] if not keys: raise Exception("No key named '%s'" % name) return keys[0] def import_key_pair_from_string(self, name, key_material, ex_location): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :param ex_location: Location where to store the key :type ex_location: :class:`NodeLocation` :return: Imported key pair object. :rtype: :class:`KeyPair` """ action = self._get_project_action('sshkey') data = { 'name': name, 'publicKey': key_material, 'region': ex_location.id } response = self.connection.request(action, data=data, method='POST') return self._to_key_pair(response.object) def delete_key_pair(self, key_pair): action = self._get_project_action('sshkey/%s' % key_pair.extra['id']) params = {'keyId': key_pair.extra['id']} self.connection.request(action, params=params, method='DELETE') return True def create_volume(self, size, name, location, snapshot=None, ex_volume_type='classic', ex_description=None): """ Create a volume. :param size: Size of volume to create (in GB). :type size: ``int`` :param name: Name of volume to create :type name: ``str`` :param location: Location to create the volume in :type location: :class:`NodeLocation` or ``None`` :param snapshot: Snapshot from which to create the new volume. (optional) :type snapshot: :class:`.VolumeSnapshot` :keyword ex_volume_type: ``'classic'`` or ``'high-speed'`` :type ex_volume_type: ``str`` :keyword ex_description: Optionnal description of volume :type ex_description: str :return: Storage Volume object :rtype: :class:`StorageVolume` """ action = self._get_project_action('volume') data = { 'name': name, 'region': location.id, 'size': size, 'type': ex_volume_type, } if ex_description: data['description'] = ex_description response = self.connection.request(action, data=data, method='POST') return self._to_volume(response.object) def destroy_volume(self, volume): action = self._get_project_action('volume/%s' % volume.id) self.connection.request(action, method='DELETE') return True def list_volumes(self, ex_location=None): """ Return a list of volumes. :keyword ex_location: Location used to filter :type ex_location: :class:`NodeLocation` or ``None`` :return: A list of volume objects. :rtype: ``list`` of :class:`StorageVolume` """ action = self._get_project_action('volume') data = {} if ex_location: data['region'] = ex_location.id response = self.connection.request(action, data=data) return self._to_volumes(response.object) def ex_get_volume(self, volume_id): """ Return a Volume object based on a volume ID. :param volume_id: The ID of the volume :type volume_id: ``int`` :return: A StorageVolume object for the volume :rtype: :class:`StorageVolume` """ action = self._get_project_action('volume/%s' % volume_id) response = self.connection.request(action) return self._to_volume(response.object) def attach_volume(self, node, volume, device=None): """ Attach a volume to a node. :param node: Node where to attach volume :type node: :class:`Node` :param volume: The ID of the volume :type volume: :class:`StorageVolume` :param device: Unsed parameter :return: True or False representing operation successful :rtype: ``bool`` """ action = self._get_project_action('volume/%s/attach' % volume.id) data = {'instanceId': node.id, 'volumeId': volume.id} self.connection.request(action, data=data, method='POST') return True def detach_volume(self, volume, ex_node=None): """ Detach a volume to a node. :param volume: The ID of the volume :type volume: :class:`StorageVolume` :param ex_node: Node to detach from (optionnal if volume is attached to only one node) :type ex_node: :class:`Node` :return: True or False representing operation successful :rtype: ``bool`` :raises: Exception: If ``ex_node`` is not provided and more than one node is attached to the volume """ action = self._get_project_action('volume/%s/detach' % volume.id) if ex_node is None: if len(volume.extra['attachedTo']) != 1: err_msg = "Volume '%s' has more or less than one attached" \ "nodes, you must specify one." raise Exception(err_msg) ex_node = self.ex_get_node(volume.extra['attachedTo'][0]) data = {'instanceId': ex_node.id} self.connection.request(action, data=data, method='POST') return True def ex_list_snapshots(self, location=None): """ List all snapshots. :keyword location: Location used to filter :type location: :class:`NodeLocation` or ``None`` :rtype: ``list`` of :class:`VolumeSnapshot` """ action = self._get_project_action('volume/snapshot') params = {} if location: params['region'] = location.id response = self.connection.request(action, params=params) return self._to_snapshots(response.object) def ex_get_volume_snapshot(self, snapshot_id): """ Returns a single volume snapshot. :param snapshot_id: Node to run the task on. :type snapshot_id: ``str`` :rtype :class:`.VolumeSnapshot`: :return: Volume snapshot. """ action = self._get_project_action('volume/snapshot/%s' % snapshot_id) response = self.connection.request(action) return self._to_snapshot(response.object) def list_volume_snapshots(self, volume): action = self._get_project_action('volume/snapshot') params = {'region': volume.extra['region']} response = self.connection.request(action, params=params) snapshots = self._to_snapshots(response.object) return [snap for snap in snapshots if snap.extra['volume_id'] == volume.id] def create_volume_snapshot(self, volume, name=None, ex_description=None): """ Create snapshot from volume :param volume: Instance of `StorageVolume` :type volume: `StorageVolume` :param name: Name of snapshot (optional) :type name: `str` | `NoneType` :param ex_description: Description of the snapshot (optional) :type ex_description: `str` | `NoneType` :rtype: :class:`VolumeSnapshot` """ action = self._get_project_action('volume/%s/snapshot/' % volume.id) data = {} if name: data['name'] = name if ex_description: data['description'] = ex_description response = self.connection.request(action, data=data, method='POST') return self._to_snapshot(response.object) def destroy_volume_snapshot(self, snapshot): action = self._get_project_action('volume/snapshot/%s' % snapshot.id) response = self.connection.request(action, method='DELETE') return response.status == httplib.OK def _to_volume(self, obj): extra = obj.copy() extra.pop('id') extra.pop('name') extra.pop('size') state = self.VOLUME_STATE_MAP.get(obj.pop('status', None), StorageVolumeState.UNKNOWN) return StorageVolume(id=obj['id'], name=obj['name'], size=obj['size'], state=state, extra=extra, driver=self) def _to_volumes(self, objs): return [self._to_volume(obj) for obj in objs] def _to_location(self, obj): location = self.connection.LOCATIONS[obj] return NodeLocation(driver=self, **location) def _to_locations(self, objs): return [self._to_location(obj) for obj in objs] def _to_node(self, obj): extra = obj.copy() if 'ipAddresses' in extra: public_ips = [ip['ip'] for ip in extra['ipAddresses']] del extra['id'] del extra['name'] return Node(id=obj['id'], name=obj['name'], state=self.NODE_STATE_MAP[obj['status']], public_ips=public_ips, private_ips=[], driver=self, extra=extra) def _to_nodes(self, objs): return [self._to_node(obj) for obj in objs] def _to_size(self, obj): extra = {'vcpus': obj['vcpus'], 'type': obj['type'], 'region': obj['region']} return NodeSize(id=obj['id'], name=obj['name'], ram=obj['ram'], disk=obj['disk'], bandwidth=None, price=None, driver=self, extra=extra) def _to_sizes(self, objs): return [self._to_size(obj) for obj in objs] def _to_image(self, obj): extra = {'region': obj['region'], 'visibility': obj['visibility']} return NodeImage(id=obj['id'], name=obj['name'], driver=self, extra=extra) def _to_images(self, objs): return [self._to_image(obj) for obj in objs] def _to_key_pair(self, obj): extra = {'regions': obj['regions'], 'id': obj['id']} return OpenStackKeyPair(name=obj['name'], public_key=obj['publicKey'], driver=self, fingerprint=None, extra=extra) def _to_key_pairs(self, objs): return [self._to_key_pair(obj) for obj in objs] def _to_snapshot(self, obj): extra = { 'volume_id': obj['volumeId'], 'region': obj['region'], 'description': obj['description'], 'status': obj['status'], } state = self.SNAPSHOT_STATE_MAP.get(obj['status'], VolumeSnapshotState.UNKNOWN) snapshot = VolumeSnapshot(id=obj['id'], driver=self, size=obj['size'], extra=extra, created=obj['creationDate'], state=state, name=obj['name']) return snapshot def _to_snapshots(self, objs): return [self._to_snapshot(obj) for obj in objs] def _ex_connection_class_kwargs(self): return {'ex_consumer_key': self.consumer_key} apache-libcloud-2.2.1/libcloud/compute/drivers/cloudsigma.py0000664000175000017500000020441712701023453024053 0ustar kamikami00000000000000# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Drivers for CloudSigma API v1.0 and v2.0. """ import re import time import copy import base64 try: import simplejson as json except: import json from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.utils.misc import str2dicts, str2list, dict2str from libcloud.common.base import ConnectionUserAndKey, JsonResponse, Response from libcloud.common.types import InvalidCredsError, ProviderError from libcloud.common.cloudsigma import INSTANCE_TYPES from libcloud.common.cloudsigma import API_ENDPOINTS_1_0 from libcloud.common.cloudsigma import API_ENDPOINTS_2_0 from libcloud.common.cloudsigma import DEFAULT_API_VERSION, DEFAULT_REGION from libcloud.compute.types import NodeState, Provider from libcloud.compute.base import NodeDriver, NodeSize, Node from libcloud.compute.base import NodeImage from libcloud.compute.base import is_private_subnet from libcloud.utils.iso8601 import parse_date from libcloud.utils.misc import get_secure_random_string __all__ = [ 'CloudSigmaNodeDriver', 'CloudSigma_1_0_NodeDriver', 'CloudSigma_2_0_NodeDriver', 'CloudSigmaError', 'CloudSigmaNodeSize', 'CloudSigmaDrive', 'CloudSigmaTag', 'CloudSigmaSubscription', 'CloudSigmaFirewallPolicy', 'CloudSigmaFirewallPolicyRule' ] class CloudSigmaNodeDriver(NodeDriver): name = 'CloudSigma' website = 'http://www.cloudsigma.com/' def __new__(cls, key, secret=None, secure=True, host=None, port=None, api_version=DEFAULT_API_VERSION, **kwargs): if cls is CloudSigmaNodeDriver: if api_version == '1.0': cls = CloudSigma_1_0_NodeDriver elif api_version == '2.0': cls = CloudSigma_2_0_NodeDriver else: raise NotImplementedError('Unsupported API version: %s' % (api_version)) return super(CloudSigmaNodeDriver, cls).__new__(cls) class CloudSigmaException(Exception): def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) class CloudSigmaInsufficientFundsException(Exception): def __repr__(self): return "" % (self.args[0]) class CloudSigmaNodeSize(NodeSize): def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): self.id = id self.name = name self.cpu = cpu self.ram = ram self.disk = disk self.bandwidth = bandwidth self.price = price self.driver = driver def __repr__(self): return (('') % (self.id, self.name, self.cpu, self.ram, self.disk, self.bandwidth, self.price, self.driver.name)) class CloudSigma_1_0_Response(Response): def success(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError() return self.status >= 200 and self.status <= 299 def parse_body(self): if not self.body: return self.body return str2dicts(self.body) def parse_error(self): return 'Error: %s' % (self.body.replace('errors:', '').strip()) class CloudSigma_1_0_Connection(ConnectionUserAndKey): host = API_ENDPOINTS_1_0[DEFAULT_REGION]['host'] responseCls = CloudSigma_1_0_Response def add_default_headers(self, headers): headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' headers['Authorization'] = 'Basic %s' % (base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) return headers class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver): type = Provider.CLOUDSIGMA name = 'CloudSigma (API v1.0)' website = 'http://www.cloudsigma.com/' connectionCls = CloudSigma_1_0_Connection IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive # imaging process NODE_STATE_MAP = { 'active': NodeState.RUNNING, 'stopped': NodeState.TERMINATED, 'dead': NodeState.TERMINATED, 'dumped': NodeState.TERMINATED, } def __init__(self, key, secret=None, secure=True, host=None, port=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS_1_0: raise ValueError('Invalid region: %s' % (region)) self._host_argument_set = host is not None self.api_name = 'cloudsigma_%s' % (region) super(CloudSigma_1_0_NodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) def reboot_node(self, node): """ Reboot a node. Because Cloudsigma API does not provide native reboot call, it's emulated using stop and start. @inherits: :class:`NodeDriver.reboot_node` """ node = self._get_node(node.id) state = node.state if state == NodeState.RUNNING: stopped = self.ex_stop_node(node) else: stopped = True if not stopped: raise CloudSigmaException( 'Could not stop node with id %s' % (node.id)) success = self.ex_start_node(node) return success def destroy_node(self, node): """ Destroy a node (all the drives associated with it are NOT destroyed). If a node is still running, it's stopped before it's destroyed. @inherits: :class:`NodeDriver.destroy_node` """ node = self._get_node(node.id) state = node.state # Node cannot be destroyed while running so it must be stopped first if state == NodeState.RUNNING: stopped = self.ex_stop_node(node) else: stopped = True if not stopped: raise CloudSigmaException( 'Could not stop node with id %s' % (node.id)) response = self.connection.request( action='/servers/%s/destroy' % (node.id), method='POST') return response.status == 204 def list_images(self, location=None): """ Return a list of available standard images (this call might take up to 15 seconds to return). @inherits: :class:`NodeDriver.list_images` """ response = self.connection.request( action='/drives/standard/info').object images = [] for value in response: if value.get('type'): if value['type'] == 'disk': image = NodeImage(id=value['drive'], name=value['name'], driver=self.connection.driver, extra={'size': value['size']}) images.append(image) return images def list_sizes(self, location=None): sizes = [] for value in INSTANCE_TYPES: key = value['id'] size = CloudSigmaNodeSize(id=value['id'], name=value['name'], cpu=value['cpu'], ram=value['memory'], disk=value['disk'], bandwidth=value['bandwidth'], price=self._get_size_price(size_id=key), driver=self.connection.driver) sizes.append(size) return sizes def list_nodes(self): response = self.connection.request(action='/servers/info').object nodes = [] for data in response: node = self._to_node(data) if node: nodes.append(node) return nodes def create_node(self, **kwargs): """ Creates a CloudSigma instance @inherits: :class:`NodeDriver.create_node` :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword smp: Number of virtual processors or None to calculate based on the cpu speed. :type smp: ``int`` :keyword nic_model: e1000, rtl8139 or virtio (is not specified, e1000 is used) :type nic_model: ``str`` :keyword vnc_password: If not set, VNC access is disabled. :type vnc_password: ``bool`` :keyword drive_type: Drive type (ssd|hdd). Defaults to hdd. :type drive_type: ``str`` """ size = kwargs['size'] image = kwargs['image'] smp = kwargs.get('smp', 'auto') nic_model = kwargs.get('nic_model', 'e1000') vnc_password = kwargs.get('vnc_password', None) drive_type = kwargs.get('drive_type', 'hdd') if nic_model not in ['e1000', 'rtl8139', 'virtio']: raise CloudSigmaException('Invalid NIC model specified') if drive_type not in ['hdd', 'ssd']: raise CloudSigmaException('Invalid drive type "%s". Valid types' ' are: hdd, ssd' % (drive_type)) drive_data = {} drive_data.update({'name': kwargs['name'], 'size': '%sG' % (kwargs['size'].disk), 'driveType': drive_type}) response = self.connection.request( action='/drives/%s/clone' % image.id, data=dict2str(drive_data), method='POST').object if not response: raise CloudSigmaException('Drive creation failed') drive_uuid = response[0]['drive'] response = self.connection.request( action='/drives/%s/info' % (drive_uuid)).object imaging_start = time.time() while 'imaging' in response[0]: response = self.connection.request( action='/drives/%s/info' % (drive_uuid)).object elapsed_time = time.time() - imaging_start timed_out = elapsed_time >= self.IMAGING_TIMEOUT if 'imaging' in response[0] and timed_out: raise CloudSigmaException('Drive imaging timed out') time.sleep(1) node_data = {} node_data.update( {'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, 'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp}) node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) if vnc_password: node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) response = self.connection.request(action='/servers/create', data=dict2str(node_data), method='POST').object if not isinstance(response, list): response = [response] node = self._to_node(response[0]) if node is None: # Insufficient funds, destroy created drive self.ex_drive_destroy(drive_uuid) raise CloudSigmaInsufficientFundsException( 'Insufficient funds, node creation failed') # Start the node after it has been created started = self.ex_start_node(node) if started: node.state = NodeState.RUNNING return node def ex_destroy_node_and_drives(self, node): """ Destroy a node and all the drives associated with it. :param node: Node which should be used :type node: :class:`libcloud.compute.base.Node` :rtype: ``bool`` """ node = self._get_node_info(node) drive_uuids = [] for key, value in node.items(): if (key.startswith('ide:') or key.startswith( 'scsi') or key.startswith('block')) and\ not (key.endswith(':bytes') or key.endswith(':requests') or key.endswith('media')): drive_uuids.append(value) node_destroyed = self.destroy_node(self._to_node(node)) if not node_destroyed: return False for drive_uuid in drive_uuids: self.ex_drive_destroy(drive_uuid) return True def ex_static_ip_list(self): """ Return a list of available static IP addresses. :rtype: ``list`` of ``str`` """ response = self.connection.request(action='/resources/ip/list', method='GET') if response.status != 200: raise CloudSigmaException('Could not retrieve IP list') ips = str2list(response.body) return ips def ex_drives_list(self): """ Return a list of all the available drives. :rtype: ``list`` of ``dict`` """ response = self.connection.request(action='/drives/info', method='GET') result = str2dicts(response.body) return result def ex_static_ip_create(self): """ Create a new static IP address.p :rtype: ``list`` of ``dict`` """ response = self.connection.request(action='/resources/ip/create', method='GET') result = str2dicts(response.body) return result def ex_static_ip_destroy(self, ip_address): """ Destroy a static IP address. :param ip_address: IP address which should be used :type ip_address: ``str`` :rtype: ``bool`` """ response = self.connection.request( action='/resources/ip/%s/destroy' % (ip_address), method='GET') return response.status == 204 def ex_drive_destroy(self, drive_uuid): """ Destroy a drive with a specified uuid. If the drive is currently mounted an exception is thrown. :param drive_uuid: Drive uuid which should be used :type drive_uuid: ``str`` :rtype: ``bool`` """ response = self.connection.request( action='/drives/%s/destroy' % (drive_uuid), method='POST') return response.status == 204 def ex_set_node_configuration(self, node, **kwargs): """ Update a node configuration. Changing most of the parameters requires node to be stopped. :param node: Node which should be used :type node: :class:`libcloud.compute.base.Node` :param kwargs: keyword arguments :type kwargs: ``dict`` :rtype: ``bool`` """ valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', '^boot$', '^nic:0:model$', '^nic:0:dhcp', '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', '^vnc:ip$', '^vnc:password$', '^vnc:tls', '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') invalid_keys = [] keys = list(kwargs.keys()) for key in keys: matches = False for regex in valid_keys: if re.match(regex, key): matches = True break if not matches: invalid_keys.append(key) if invalid_keys: raise CloudSigmaException( 'Invalid configuration key specified: %s' % (',' .join(invalid_keys))) response = self.connection.request( action='/servers/%s/set' % (node.id), data=dict2str(kwargs), method='POST') return (response.status == 200 and response.body != '') def ex_start_node(self, node): """ Start a node. :param node: Node which should be used :type node: :class:`libcloud.compute.base.Node` :rtype: ``bool`` """ response = self.connection.request( action='/servers/%s/start' % (node.id), method='POST') return response.status == 200 def ex_stop_node(self, node): """ Stop (shutdown) a node. :param node: Node which should be used :type node: :class:`libcloud.compute.base.Node` :rtype: ``bool`` """ response = self.connection.request( action='/servers/%s/stop' % (node.id), method='POST') return response.status == 204 def ex_shutdown_node(self, node): """ Stop (shutdown) a node. @inherits: :class:`CloudSigmaBaseNodeDriver.ex_stop_node` """ return self.ex_stop_node(node) def ex_destroy_drive(self, drive_uuid): """ Destroy a drive. :param drive_uuid: Drive uuid which should be used :type drive_uuid: ``str`` :rtype: ``bool`` """ response = self.connection.request( action='/drives/%s/destroy' % (drive_uuid), method='POST') return response.status == 204 def _ex_connection_class_kwargs(self): """ Return the host value based on the user supplied region. """ kwargs = {} if not self._host_argument_set: kwargs['host'] = API_ENDPOINTS_1_0[self.region]['host'] return kwargs def _to_node(self, data): if data: try: state = self.NODE_STATE_MAP[data['status']] except KeyError: state = NodeState.UNKNOWN if 'server' not in data: # Response does not contain server UUID if the server # creation failed because of insufficient funds. return None public_ips = [] if 'nic:0:dhcp' in data: if isinstance(data['nic:0:dhcp'], list): public_ips = data['nic:0:dhcp'] else: public_ips = [data['nic:0:dhcp']] extra = {} extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), ('status', 'str')] for key, value_type in extra_keys: if key in data: value = data[key] if value_type == 'int': value = int(value) elif value_type == 'auto': try: value = int(value) except ValueError: pass extra.update({key: value}) if 'vnc:ip' in data and 'vnc:password' in data: extra.update({'vnc_ip': data['vnc:ip'], 'vnc_password': data['vnc:password']}) node = Node(id=data['server'], name=data['name'], state=state, public_ips=public_ips, private_ips=None, driver=self.connection.driver, extra=extra) return node return None def _get_node(self, node_id): nodes = self.list_nodes() node = [node for node in nodes if node.id == node.id] if not node: raise CloudSigmaException( 'Node with id %s does not exist' % (node_id)) return node[0] def _get_node_info(self, node): response = self.connection.request( action='/servers/%s/info' % (node.id)) result = str2dicts(response.body) return result[0] class CloudSigmaZrhConnection(CloudSigma_1_0_Connection): """ Connection class for the CloudSigma driver for the Zurich end-point """ host = API_ENDPOINTS_1_0['zrh']['host'] class CloudSigmaZrhNodeDriver(CloudSigma_1_0_NodeDriver): """ CloudSigma node driver for the Zurich end-point """ connectionCls = CloudSigmaZrhConnection api_name = 'cloudsigma_zrh' class CloudSigmaLvsConnection(CloudSigma_1_0_Connection): """ Connection class for the CloudSigma driver for the Las Vegas end-point """ host = API_ENDPOINTS_1_0['lvs']['host'] class CloudSigmaLvsNodeDriver(CloudSigma_1_0_NodeDriver): """ CloudSigma node driver for the Las Vegas end-point """ connectionCls = CloudSigmaLvsConnection api_name = 'cloudsigma_lvs' class CloudSigmaError(ProviderError): """ Represents CloudSigma API error. """ def __init__(self, http_code, error_type, error_msg, error_point, driver): """ :param http_code: HTTP status code. :type http_code: ``int`` :param error_type: Type of error (validation / notexist / backend / permissions database / concurrency / billing / payment) :type error_type: ``str`` :param error_msg: A description of the error that occurred. :type error_msg: ``str`` :param error_point: Point at which the error occurred. Can be None. :type error_point: ``str`` or ``None`` """ super(CloudSigmaError, self).__init__(http_code=http_code, value=error_msg, driver=driver) self.error_type = error_type self.error_msg = error_msg self.error_point = error_point class CloudSigmaSubscription(object): """ Represents CloudSigma subscription. """ def __init__(self, id, resource, amount, period, status, price, start_time, end_time, auto_renew, subscribed_object=None): """ :param id: Subscription ID. :type id: ``str`` :param resource: Resource (e.g vlan, ip, etc.). :type resource: ``str`` :param period: Subscription period. :type period: ``str`` :param status: Subscription status (active / inactive). :type status: ``str`` :param price: Subscription price. :type price: ``str`` :param start_time: Start time for this subscription. :type start_time: ``datetime.datetime`` :param end_time: End time for this subscription. :type end_time: ``datetime.datetime`` :param auto_renew: True if the subscription is auto renewed. :type auto_renew: ``bool`` :param subscribed_object: Optional UUID of the subscribed object. :type subscribed_object: ``str`` """ self.id = id self.resource = resource self.amount = amount self.period = period self.status = status self.price = price self.start_time = start_time self.end_time = end_time self.auto_renew = auto_renew self.subscribed_object = subscribed_object def __str__(self): return self.__repr__() def __repr__(self): return ('' % (self.id, self.resource, self.amount, self.period, self.subscribed_object)) class CloudSigmaTag(object): """ Represents a CloudSigma tag object. """ def __init__(self, id, name, resources=None): """ :param id: Tag ID. :type id: ``str`` :param name: Tag name. :type name: ``str`` :param resource: IDs of resources which are associated with this tag. :type resources: ``list`` of ``str`` """ self.id = id self.name = name self.resources = resources if resources else [] def __str__(self): return self.__repr__() def __repr__(self): return ('' % (self.id, self.name, repr(self.resources))) class CloudSigmaDrive(NodeImage): """ Represents a CloudSigma drive. """ def __init__(self, id, name, size, media, status, driver, extra=None): """ :param id: Drive ID. :type id: ``str`` :param name: Drive name. :type name: ``str`` :param size: Drive size (in bytes). :type size: ``int`` :param media: Drive media (cdrom / disk). :type media: ``str`` :param status: Drive status (unmounted / mounted). :type status: ``str`` """ super(CloudSigmaDrive, self).__init__(id=id, name=name, driver=driver, extra=extra) self.size = size self.media = media self.status = status def __str__(self): return self.__repr__() def __repr__(self): return (('') % (self.id, self.name, self.size, self.media, self.status)) class CloudSigmaFirewallPolicy(object): """ Represents a CloudSigma firewall policy. """ def __init__(self, id, name, rules): """ :param id: Policy ID. :type id: ``str`` :param name: Policy name. :type name: ``str`` :param rules: Rules associated with this policy. :type rules: ``list`` of :class:`.CloudSigmaFirewallPolicyRule` objects """ self.id = id self.name = name self.rules = rules if rules else [] def __str__(self): return self.__repr__() def __repr__(self): return (('') % (self.id, self.name, repr(self.rules))) class CloudSigmaFirewallPolicyRule(object): """ Represents a CloudSigma firewall policy rule. """ def __init__(self, action, direction, ip_proto=None, src_ip=None, src_port=None, dst_ip=None, dst_port=None, comment=None): """ :param action: Action (drop / accept). :type action: ``str`` :param direction: Rule direction (in / out / both)> :type direction: ``str`` :param ip_proto: IP protocol (tcp / udp). :type ip_proto: ``str``. :param src_ip: Source IP in CIDR notation. :type src_ip: ``str`` :param src_port: Source port or a port range. :type src_port: ``str`` :param dst_ip: Destination IP in CIDR notation. :type dst_ip: ``str`` :param src_port: Destination port or a port range. :type src_port: ``str`` :param comment: Comment associated with the policy. :type comment: ``str`` """ self.action = action self.direction = direction self.ip_proto = ip_proto self.src_ip = src_ip self.src_port = src_port self.dst_ip = dst_ip self.dst_port = dst_port self.comment = comment def __str__(self): return self.__repr__() def __repr__(self): return (('') % (self.action, self.direction)) class CloudSigma_2_0_Response(JsonResponse): success_status_codes = [ httplib.OK, httplib.ACCEPTED, httplib.NO_CONTENT, httplib.CREATED ] def success(self): return self.status in self.success_status_codes def parse_error(self): if int(self.status) == httplib.UNAUTHORIZED: raise InvalidCredsError('Invalid credentials') body = self.parse_body() errors = self._parse_errors_from_body(body=body) if errors: # Throw first error raise errors[0] return body def _parse_errors_from_body(self, body): """ Parse errors from the response body. :return: List of error objects. :rtype: ``list`` of :class:`.CloudSigmaError` objects """ errors = [] if not isinstance(body, list): return None for item in body: if 'error_type' not in item: # Unrecognized error continue error = CloudSigmaError(http_code=self.status, error_type=item['error_type'], error_msg=item['error_message'], error_point=item['error_point'], driver=self.connection.driver) errors.append(error) return errors class CloudSigma_2_0_Connection(ConnectionUserAndKey): host = API_ENDPOINTS_2_0[DEFAULT_REGION]['host'] responseCls = CloudSigma_2_0_Response api_prefix = '/api/2.0' def add_default_headers(self, headers): headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' headers['Authorization'] = 'Basic %s' % (base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) return headers def encode_data(self, data): data = json.dumps(data) return data def request(self, action, params=None, data=None, headers=None, method='GET', raw=False): params = params or {} action = self.api_prefix + action if method == 'GET': params['limit'] = 0 # we want all the items back return super(CloudSigma_2_0_Connection, self).request(action=action, params=params, data=data, headers=headers, method=method, raw=raw) class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver): """ Driver for CloudSigma API v2.0. """ name = 'CloudSigma (API v2.0)' api_name = 'cloudsigma_zrh' website = 'http://www.cloudsigma.com/' connectionCls = CloudSigma_2_0_Connection # Default drive transition timeout in seconds DRIVE_TRANSITION_TIMEOUT = 500 # How long to sleep between different polling periods while waiting for # drive transition DRIVE_TRANSITION_SLEEP_INTERVAL = 5 NODE_STATE_MAP = { 'starting': NodeState.PENDING, 'stopping': NodeState.PENDING, 'unavailable': NodeState.ERROR, 'running': NodeState.RUNNING, 'stopped': NodeState.STOPPED, 'paused': NodeState.PAUSED } def __init__(self, key, secret, secure=True, host=None, port=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS_2_0: raise ValueError('Invalid region: %s' % (region)) if not secure: # CloudSigma drive uses Basic Auth authentication and we don't want # to allow user to accidentally send credentials over the wire in # plain-text raise ValueError('CloudSigma driver only supports a ' 'secure connection') self._host_argument_set = host is not None super(CloudSigma_2_0_NodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) def list_nodes(self, ex_tag=None): """ List available nodes. :param ex_tag: If specified, only return servers tagged with the provided tag. :type ex_tag: :class:`CloudSigmaTag` """ if ex_tag: action = '/tags/%s/servers/detail/' % (ex_tag.id) else: action = '/servers/detail/' response = self.connection.request(action=action, method='GET').object nodes = [self._to_node(data=item) for item in response['objects']] return nodes def list_sizes(self): """ List available sizes. """ sizes = [] for value in INSTANCE_TYPES: key = value['id'] size = CloudSigmaNodeSize(id=value['id'], name=value['name'], cpu=value['cpu'], ram=value['memory'], disk=value['disk'], bandwidth=value['bandwidth'], price=self._get_size_price(size_id=key), driver=self.connection.driver) sizes.append(size) return sizes def list_images(self): """ Return a list of available pre-installed library drives. Note: If you want to list all the available library drives (both pre-installed and installation CDs), use :meth:`ex_list_library_drives` method. """ response = self.connection.request(action='/libdrives/').object images = [self._to_image(data=item) for item in response['objects']] # We filter out non pre-installed library drives by default because # they can't be used directly following a default Libcloud server # creation flow. images = [image for image in images if image.extra['image_type'] == 'preinst'] return images def create_node(self, name, size, image, ex_metadata=None, ex_vnc_password=None, ex_avoid=None, ex_vlan=None): """ Create a new server. Server creation consists multiple steps depending on the type of the image used. 1. Installation CD: 1. Create a server and attach installation cd 2. Start a server 2. Pre-installed image: 1. Clone provided library drive so we can use it 2. Resize cloned drive to the desired size 3. Create a server and attach cloned drive 4. Start a server :param ex_metadata: Key / value pairs to associate with the created node. (optional) :type ex_metadata: ``dict`` :param ex_vnc_password: Password to use for VNC access. If not provided, random password is generated. :type ex_vnc_password: ``str`` :param ex_avoid: A list of server UUIDs to avoid when starting this node. (optional) :type ex_avoid: ``list`` :param ex_vlan: Optional UUID of a VLAN network to use. If specified, server will have two nics assigned - 1 with a public ip and 1 with the provided VLAN. :type ex_vlan: ``str`` """ is_installation_cd = self._is_installation_cd(image=image) if ex_vnc_password: vnc_password = ex_vnc_password else: # VNC password is not provided, generate a random one. vnc_password = get_secure_random_string(size=12) drive_name = '%s-drive' % (name) # size is specified in GB drive_size = (size.disk * 1024 * 1024 * 1024) if not is_installation_cd: # 1. Clone library drive so we can use it drive = self.ex_clone_drive(drive=image, name=drive_name) # Wait for drive clone to finish drive = self._wait_for_drive_state_transition(drive=drive, state='unmounted') # 2. Resize drive to the desired disk size if the desired disk size # is larger than the cloned drive size. if drive_size > drive.size: drive = self.ex_resize_drive(drive=drive, size=drive_size) # Wait for drive resize to finish drive = self._wait_for_drive_state_transition(drive=drive, state='unmounted') else: # No need to clone installation CDs drive = image # 3. Create server and attach cloned drive # ide 0:0 data = {} data['name'] = name data['cpu'] = size.cpu data['mem'] = (size.ram * 1024 * 1024) data['vnc_password'] = vnc_password if ex_metadata: data['meta'] = ex_metadata # Assign 1 public interface (DHCP) to the node nic = { 'boot_order': None, 'ip_v4_conf': { 'conf': 'dhcp', }, 'ip_v6_conf': None } nics = [nic] if ex_vlan: # Assign another interface for VLAN nic = { 'boot_order': None, 'ip_v4_conf': None, 'ip_v6_conf': None, 'vlan': ex_vlan } nics.append(nic) # Need to use IDE for installation CDs if is_installation_cd: device_type = 'ide' else: device_type = 'virtio' drive = { 'boot_order': 1, 'dev_channel': '0:0', 'device': device_type, 'drive': drive.id } drives = [drive] data['nics'] = nics data['drives'] = drives action = '/servers/' response = self.connection.request(action=action, method='POST', data=data) node = self._to_node(response.object['objects'][0]) # 4. Start server self.ex_start_node(node=node, ex_avoid=ex_avoid) return node def destroy_node(self, node): """ Destroy the node and all the associated drives. :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ action = '/servers/%s/' % (node.id) params = {'recurse': 'all_drives'} response = self.connection.request(action=action, method='DELETE', params=params) return response.status == httplib.NO_CONTENT # Server extension methods def ex_edit_node(self, node, params): """ Edit a node. :param node: Node to edit. :type node: :class:`libcloud.compute.base.Node` :param params: Node parameters to update. :type params: ``dict`` :return Edited node. :rtype: :class:`libcloud.compute.base.Node` """ data = {} # name, cpu, mem and vnc_password attributes must always be present so # we just copy them from the to-be-edited node data['name'] = node.name data['cpu'] = node.extra['cpu'] data['mem'] = node.extra['mem'] data['vnc_password'] = node.extra['vnc_password'] nics = copy.deepcopy(node.extra.get('nics', [])) data['nics'] = nics data.update(params) action = '/servers/%s/' % (node.id) response = self.connection.request(action=action, method='PUT', data=data).object node = self._to_node(data=response) return node def ex_start_node(self, node, ex_avoid=None): """ Start a node. :param node: Node to start. :type node: :class:`libcloud.compute.base.Node` :param ex_avoid: A list of other server uuids to avoid when starting this node. If provided, node will attempt to be started on a different physical infrastructure from other servers specified using this argument. (optional) :type ex_avoid: ``list`` """ params = {} if ex_avoid: params['avoid'] = ','.join(ex_avoid) path = '/servers/%s/action/' % (node.id) response = self._perform_action(path=path, action='start', params=params, method='POST') return response.status == httplib.ACCEPTED def ex_stop_node(self, node): """ Stop a node. """ path = '/servers/%s/action/' % (node.id) response = self._perform_action(path=path, action='stop', method='POST') return response.status == httplib.ACCEPTED def ex_clone_node(self, node, name=None, random_vnc_password=None): """ Clone the provided node. :param name: Optional name for the cloned node. :type name: ``str`` :param random_vnc_password: If True, a new random VNC password will be generated for the cloned node. Otherwise password from the cloned node will be reused. :type random_vnc_password: ``bool`` :return: Cloned node. :rtype: :class:`libcloud.compute.base.Node` """ data = {} data['name'] = name data['random_vnc_password'] = random_vnc_password path = '/servers/%s/action/' % (node.id) response = self._perform_action(path=path, action='clone', method='POST', data=data).object node = self._to_node(data=response) return node def ex_open_vnc_tunnel(self, node): """ Open a VNC tunnel to the provided node and return the VNC url. :param node: Node to open the VNC tunnel to. :type node: :class:`libcloud.compute.base.Node` :return: URL of the opened VNC tunnel. :rtype: ``str`` """ path = '/servers/%s/action/' % (node.id) response = self._perform_action(path=path, action='open_vnc', method='POST').object vnc_url = response['vnc_url'] return vnc_url def ex_close_vnc_tunnel(self, node): """ Close a VNC server to the provided node. :param node: Node to close the VNC tunnel to. :type node: :class:`libcloud.compute.base.Node` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ path = '/servers/%s/action/' % (node.id) response = self._perform_action(path=path, action='close_vnc', method='POST') return response.status == httplib.ACCEPTED # Drive extension methods def ex_list_library_drives(self): """ Return a list of all the available library drives (pre-installed and installation CDs). :rtype: ``list`` of :class:`.CloudSigmaDrive` objects """ response = self.connection.request(action='/libdrives/').object drives = [self._to_drive(data=item) for item in response['objects']] return drives def ex_list_user_drives(self): """ Return a list of all the available user's drives. :rtype: ``list`` of :class:`.CloudSigmaDrive` objects """ response = self.connection.request(action='/drives/detail/').object drives = [self._to_drive(data=item) for item in response['objects']] return drives def ex_create_drive(self, name, size, media='disk', ex_avoid=None): """ Create a new drive. :param name: Drive name. :type name: ``str`` :param size: Drive size in bytes. :type size: ``int`` :param media: Drive media type (cdrom, disk). :type media: ``str`` :param ex_avoid: A list of other drive uuids to avoid when creating this drive. If provided, drive will attempt to be created on a different physical infrastructure from other drives specified using this argument. (optional) :type ex_avoid: ``list`` :return: Created drive object. :rtype: :class:`.CloudSigmaDrive` """ params = {} data = { 'name': name, 'size': size, 'media': media } if ex_avoid: params['avoid'] = ','.join(ex_avoid) action = '/drives/' response = self.connection.request(action=action, method='POST', params=params, data=data).object drive = self._to_drive(data=response['objects'][0]) return drive def ex_clone_drive(self, drive, name=None, ex_avoid=None): """ Clone a library or a standard drive. :param drive: Drive to clone. :type drive: :class:`libcloud.compute.base.NodeImage` or :class:`.CloudSigmaDrive` :param name: Optional name for the cloned drive. :type name: ``str`` :param ex_avoid: A list of other drive uuids to avoid when creating this drive. If provided, drive will attempt to be created on a different physical infrastructure from other drives specified using this argument. (optional) :type ex_avoid: ``list`` :return: New cloned drive. :rtype: :class:`.CloudSigmaDrive` """ params = {} data = {} if ex_avoid: params['avoid'] = ','.join(ex_avoid) if name: data['name'] = name path = '/drives/%s/action/' % (drive.id) response = self._perform_action(path=path, action='clone', params=params, data=data, method='POST') drive = self._to_drive(data=response.object['objects'][0]) return drive def ex_resize_drive(self, drive, size): """ Resize a drive. :param drive: Drive to resize. :param size: New drive size in bytes. :type size: ``int`` :return: Drive object which is being resized. :rtype: :class:`.CloudSigmaDrive` """ path = '/drives/%s/action/' % (drive.id) data = {'name': drive.name, 'size': size, 'media': 'disk'} response = self._perform_action(path=path, action='resize', method='POST', data=data) drive = self._to_drive(data=response.object['objects'][0]) return drive def ex_attach_drive(self, node): """ Attach a drive to the provided node. """ # TODO pass def ex_get_drive(self, drive_id): """ Retrieve information about a single drive. :param drive_id: ID of the drive to retrieve. :type drive_id: ``str`` :return: Drive object. :rtype: :class:`.CloudSigmaDrive` """ action = '/drives/%s/' % (drive_id) response = self.connection.request(action=action).object drive = self._to_drive(data=response) return drive # Firewall policies extension methods def ex_list_firewall_policies(self): """ List firewall policies. :rtype: ``list`` of :class:`.CloudSigmaFirewallPolicy` """ action = '/fwpolicies/detail/' response = self.connection.request(action=action, method='GET').object policies = [self._to_firewall_policy(data=item) for item in response['objects']] return policies def ex_create_firewall_policy(self, name, rules=None): """ Create a firewall policy. :param name: Policy name. :type name: ``str`` :param rules: List of firewall policy rules to associate with this policy. (optional) :type rules: ``list`` of ``dict`` :return: Created firewall policy object. :rtype: :class:`.CloudSigmaFirewallPolicy` """ data = {} obj = {} obj['name'] = name if rules: obj['rules'] = rules data['objects'] = [obj] action = '/fwpolicies/' response = self.connection.request(action=action, method='POST', data=data).object policy = self._to_firewall_policy(data=response['objects'][0]) return policy def ex_attach_firewall_policy(self, policy, node, nic_mac=None): """ Attach firewall policy to a public NIC interface on the server. :param policy: Firewall policy to attach. :type policy: :class:`.CloudSigmaFirewallPolicy` :param node: Node to attach policy to. :type node: :class:`libcloud.compute.base.Node` :param nic_mac: Optional MAC address of the NIC to add the policy to. If not specified, first public interface is used instead. :type nic_mac: ``str`` :return: Node object to which the policy was attached to. :rtype: :class:`libcloud.compute.base.Node` """ nics = copy.deepcopy(node.extra.get('nics', [])) if nic_mac: nic = [n for n in nics if n['mac'] == nic_mac] else: nic = nics if len(nic) == 0: raise ValueError('Cannot find the NIC interface to attach ' 'a policy to') nic = nic[0] nic['firewall_policy'] = policy.id params = {'nics': nics} node = self.ex_edit_node(node=node, params=params) return node def ex_delete_firewall_policy(self, policy): """ Delete a firewall policy. :param policy: Policy to delete to. :type policy: :class:`.CloudSigmaFirewallPolicy` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ action = '/fwpolicies/%s/' % (policy.id) response = self.connection.request(action=action, method='DELETE') return response.status == httplib.NO_CONTENT # Availability groups extension methods def ex_list_servers_availability_groups(self): """ Return which running servers share the same physical compute host. :return: A list of server UUIDs which share the same physical compute host. Servers which share the same host will be stored under the same list index. :rtype: ``list`` of ``list`` """ action = '/servers/availability_groups/' response = self.connection.request(action=action, method='GET') return response.object def ex_list_drives_availability_groups(self): """ Return which drives share the same physical storage host. :return: A list of drive UUIDs which share the same physical storage host. Drives which share the same host will be stored under the same list index. :rtype: ``list`` of ``list`` """ action = '/drives/availability_groups/' response = self.connection.request(action=action, method='GET') return response.object # Tag extension methods def ex_list_tags(self): """ List all the available tags. :rtype: ``list`` of :class:`.CloudSigmaTag` objects """ action = '/tags/detail/' response = self.connection.request(action=action, method='GET').object tags = [self._to_tag(data=item) for item in response['objects']] return tags def ex_get_tag(self, tag_id): """ Retrieve a single tag. :param tag_id: ID of the tag to retrieve. :type tag_id: ``str`` :rtype: ``list`` of :class:`.CloudSigmaTag` objects """ action = '/tags/%s/' % (tag_id) response = self.connection.request(action=action, method='GET').object tag = self._to_tag(data=response) return tag def ex_create_tag(self, name, resource_uuids=None): """ Create a tag. :param name: Tag name. :type name: ``str`` :param resource_uuids: Optional list of resource UUIDs to assign this tag go. :type resource_uuids: ``list`` of ``str`` :return: Created tag object. :rtype: :class:`.CloudSigmaTag` """ data = {} data['objects'] = [ { 'name': name } ] if resource_uuids: data['resources'] = resource_uuids action = '/tags/' response = self.connection.request(action=action, method='POST', data=data).object tag = self._to_tag(data=response['objects'][0]) return tag def ex_tag_resource(self, resource, tag): """ Associate tag with the provided resource. :param resource: Resource to associate a tag with. :type resource: :class:`libcloud.compute.base.Node` or :class:`.CloudSigmaDrive` :param tag: Tag to associate with the resources. :type tag: :class:`.CloudSigmaTag` :return: Updated tag object. :rtype: :class:`.CloudSigmaTag` """ if not hasattr(resource, 'id'): raise ValueError('Resource doesn\'t have id attribute') return self.ex_tag_resources(resources=[resource], tag=tag) def ex_tag_resources(self, resources, tag): """ Associate tag with the provided resources. :param resources: Resources to associate a tag with. :type resources: ``list`` of :class:`libcloud.compute.base.Node` or :class:`.CloudSigmaDrive` :param tag: Tag to associate with the resources. :type tag: :class:`.CloudSigmaTag` :return: Updated tag object. :rtype: :class:`.CloudSigmaTag` """ resources = tag.resources[:] for resource in resources: if not hasattr(resource, 'id'): raise ValueError('Resource doesn\'t have id attribute') resources.append(resource.id) resources = list(set(resources)) data = { 'name': tag.name, 'resources': resources } action = '/tags/%s/' % (tag.id) response = self.connection.request(action=action, method='PUT', data=data).object tag = self._to_tag(data=response) return tag def ex_delete_tag(self, tag): """ Delete a tag. :param tag: Tag to delete. :type tag: :class:`.CloudSigmaTag` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ action = '/tags/%s/' % (tag.id) response = self.connection.request(action=action, method='DELETE') return response.status == httplib.NO_CONTENT # Account extension methods def ex_get_balance(self): """ Retrieve account balance information. :return: Dictionary with two items ("balance" and "currency"). :rtype: ``dict`` """ action = '/balance/' response = self.connection.request(action=action, method='GET') return response.object def ex_get_pricing(self): """ Retrieve pricing information that are applicable to the cloud. :return: Dictionary with pricing information. :rtype: ``dict`` """ action = '/pricing/' response = self.connection.request(action=action, method='GET') return response.object def ex_get_usage(self): """ Retrieve account current usage information. :return: Dictionary with two items ("balance" and "usage"). :rtype: ``dict`` """ action = '/currentusage/' response = self.connection.request(action=action, method='GET') return response.object def ex_list_subscriptions(self, status='all', resources=None): """ List subscriptions for this account. :param status: Only return subscriptions with the provided status (optional). :type status: ``str`` :param resources: Only return subscriptions for the provided resources (optional). :type resources: ``list`` :rtype: ``list`` """ params = {} if status: params['status'] = status if resources: params['resource'] = ','.join(resources) response = self.connection.request(action='/subscriptions/', params=params).object subscriptions = self._to_subscriptions(data=response) return subscriptions def ex_toggle_subscription_auto_renew(self, subscription): """ Toggle subscription auto renew status. :param subscription: Subscription to toggle the auto renew flag for. :type subscription: :class:`.CloudSigmaSubscription` :return: ``True`` on success, ``False`` otherwise. :rtype: ``bool`` """ path = '/subscriptions/%s/action/' % (subscription.id) response = self._perform_action(path=path, action='auto_renew', method='POST') return response.status == httplib.OK def ex_create_subscription(self, amount, period, resource, auto_renew=False): """ Create a new subscription. :param amount: Subscription amount. For example, in dssd case this would be disk size in gigabytes. :type amount: ``int`` :param period: Subscription period. For example: 30 days, 1 week, 1 month, ... :type period: ``str`` :param resource: Resource the purchase the subscription for. :type resource: ``str`` :param auto_renew: True to automatically renew the subscription. :type auto_renew: ``bool`` """ data = [ { 'amount': amount, 'period': period, 'auto_renew': auto_renew, 'resource': resource } ] response = self.connection.request(action='/subscriptions/', data=data, method='POST') data = response.object['objects'][0] subscription = self._to_subscription(data=data) return subscription # Misc extension methods def ex_list_capabilities(self): """ Retrieve all the basic and sensible limits of the API. :rtype: ``dict`` """ action = '/capabilities/' response = self.connection.request(action=action, method='GET') capabilities = response.object return capabilities def _parse_ips_from_nic(self, nic): """ Parse private and public IP addresses from the provided network interface object. :param nic: NIC object. :type nic: ``dict`` :return: (public_ips, private_ips) tuple. :rtype: ``tuple`` """ public_ips, private_ips = [], [] ipv4_conf = nic['ip_v4_conf'] ipv6_conf = nic['ip_v6_conf'] ip_v4 = ipv4_conf['ip'] if ipv4_conf else None ip_v6 = ipv6_conf['ip'] if ipv6_conf else None ipv4 = ip_v4['uuid'] if ip_v4 else None ipv6 = ip_v4['uuid'] if ip_v6 else None ips = [] if ipv4: ips.append(ipv4) if ipv6: ips.append(ipv6) runtime = nic['runtime'] ip_v4 = runtime['ip_v4'] if nic['runtime'] else None ip_v6 = runtime['ip_v6'] if nic['runtime'] else None ipv4 = ip_v4['uuid'] if ip_v4 else None ipv6 = ip_v4['uuid'] if ip_v6 else None if ipv4: ips.append(ipv4) if ipv6: ips.append(ipv6) ips = set(ips) for ip in ips: if is_private_subnet(ip): private_ips.append(ip) else: public_ips.append(ip) return public_ips, private_ips def _to_node(self, data): extra_keys = ['cpu', 'mem', 'nics', 'vnc_password', 'meta'] id = data['uuid'] name = data['name'] state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) public_ips = [] private_ips = [] extra = self._extract_values(obj=data, keys=extra_keys) for nic in data['nics']: _public_ips, _private_ips = self._parse_ips_from_nic(nic=nic) public_ips.extend(_public_ips) private_ips.extend(_private_ips) node = Node(id=id, name=name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra=extra) return node def _to_image(self, data): extra_keys = ['description', 'arch', 'image_type', 'os', 'licenses', 'media', 'meta'] id = data['uuid'] name = data['name'] extra = self._extract_values(obj=data, keys=extra_keys) image = NodeImage(id=id, name=name, driver=self, extra=extra) return image def _to_drive(self, data): id = data['uuid'] name = data['name'] size = data['size'] media = data['media'] status = data['status'] extra = {} drive = CloudSigmaDrive(id=id, name=name, size=size, media=media, status=status, driver=self, extra=extra) return drive def _to_tag(self, data): resources = data['resources'] resources = [resource['uuid'] for resource in resources] tag = CloudSigmaTag(id=data['uuid'], name=data['name'], resources=resources) return tag def _to_subscriptions(self, data): subscriptions = [] for item in data['objects']: subscription = self._to_subscription(data=item) subscriptions.append(subscription) return subscriptions def _to_subscription(self, data): start_time = parse_date(data['start_time']) end_time = parse_date(data['end_time']) obj_uuid = data['subscribed_object'] subscription = CloudSigmaSubscription(id=data['id'], resource=data['resource'], amount=int(data['amount']), period=data['period'], status=data['status'], price=data['price'], start_time=start_time, end_time=end_time, auto_renew=data['auto_renew'], subscribed_object=obj_uuid) return subscription def _to_firewall_policy(self, data): rules = [] for item in data.get('rules', []): rule = CloudSigmaFirewallPolicyRule(action=item['action'], direction=item['direction'], ip_proto=item['ip_proto'], src_ip=item['src_ip'], src_port=item['src_port'], dst_ip=item['dst_ip'], dst_port=item['dst_port'], comment=item['comment']) rules.append(rule) policy = CloudSigmaFirewallPolicy(id=data['uuid'], name=data['name'], rules=rules) return policy def _perform_action(self, path, action, method='POST', params=None, data=None): """ Perform API action and return response object. """ if params: params = params.copy() else: params = {} params['do'] = action response = self.connection.request(action=path, method=method, params=params, data=data) return response def _is_installation_cd(self, image): """ Detect if the provided image is an installation CD. :rtype: ``bool`` """ if isinstance(image, CloudSigmaDrive) and image.media == 'cdrom': return True return False def _extract_values(self, obj, keys): """ Extract values from a dictionary and return a new dictionary with extracted values. :param obj: Dictionary to extract values from. :type obj: ``dict`` :param keys: Keys to extract. :type keys: ``list`` :return: Dictionary with extracted values. :rtype: ``dict`` """ result = {} for key in keys: result[key] = obj[key] return result def _wait_for_drive_state_transition(self, drive, state, timeout=DRIVE_TRANSITION_TIMEOUT): """ Wait for a drive to transition to the provided state. Note: This function blocks and periodically calls "GET drive" endpoint to check if the drive has already transitioned to the desired state. :param drive: Drive to wait for. :type drive: :class:`.CloudSigmaDrive` :param state: Desired drive state. :type state: ``str`` :param timeout: How long to wait for the transition (in seconds) before timing out. :type timeout: ``int`` :return: Drive object. :rtype: :class:`.CloudSigmaDrive` """ start_time = time.time() while drive.status != state: drive = self.ex_get_drive(drive_id=drive.id) if drive.status == state: break current_time = time.time() delta = (current_time - start_time) if delta >= timeout: msg = ('Timed out while waiting for drive transition ' '(timeout=%s seconds)' % (timeout)) raise Exception(msg) time.sleep(self.DRIVE_TRANSITION_SLEEP_INTERVAL) return drive def _ex_connection_class_kwargs(self): """ Return the host value based on the user supplied region. """ kwargs = {} if not self._host_argument_set: kwargs['host'] = API_ENDPOINTS_2_0[self.region]['host'] return kwargs apache-libcloud-2.2.1/libcloud/compute/drivers/rackspace.py0000664000175000017500000002234213153541406023660 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rackspace driver """ from libcloud.compute.types import Provider, LibcloudError, VolumeSnapshotState from libcloud.compute.base import NodeLocation, VolumeSnapshot from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\ OpenStack_1_0_NodeDriver, OpenStack_1_0_Response from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\ OpenStack_1_1_NodeDriver from libcloud.common.rackspace import AUTH_URL from libcloud.utils.iso8601 import parse_date SERVICE_TYPE = 'compute' SERVICE_NAME_GEN1 = 'cloudServers' SERVICE_NAME_GEN2 = 'cloudServersOpenStack' ENDPOINT_ARGS_MAP = { 'dfw': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'DFW'}, 'ord': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'ORD'}, 'iad': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'IAD'}, 'lon': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'LON'}, 'syd': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'SYD'}, 'hkg': {'service_type': SERVICE_TYPE, 'name': SERVICE_NAME_GEN2, 'region': 'HKG'}, } class RackspaceFirstGenConnection(OpenStack_1_0_Connection): """ Connection class for the Rackspace first-gen driver. """ responseCls = OpenStack_1_0_Response XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' auth_url = AUTH_URL _auth_version = '2.0' cache_busting = True def __init__(self, *args, **kwargs): self.region = kwargs.pop('region', None) super(RackspaceFirstGenConnection, self).__init__(*args, **kwargs) def get_endpoint(self): if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint(service_type=SERVICE_TYPE, name=SERVICE_NAME_GEN1) else: raise LibcloudError( 'Auth version "%s" not supported' % (self._auth_version)) public_url = ep.url if not public_url: raise LibcloudError('Could not find specified endpoint') # This is a nasty hack, but it's required because of how the # auth system works. # Old US accounts can access UK API endpoint, but they don't # have this endpoint in the service catalog. Same goes for the # old UK accounts and US endpoint. if self.region == 'us': # Old UK account, which only have uk endpoint in the catalog public_url = public_url.replace('https://lon.servers.api', 'https://servers.api') elif self.region == 'uk': # Old US account, which only has us endpoints in the catalog public_url = public_url.replace('https://servers.api', 'https://lon.servers.api') return public_url def get_service_name(self): return SERVICE_NAME_GEN1 class RackspaceFirstGenNodeDriver(OpenStack_1_0_NodeDriver): name = 'Rackspace Cloud (First Gen)' website = 'http://www.rackspace.com' connectionCls = RackspaceFirstGenConnection type = Provider.RACKSPACE_FIRST_GEN api_name = 'rackspace' def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us', **kwargs): """ @inherits: :class:`NodeDriver.__init__` :param region: Region ID which should be used :type region: ``str`` """ if region not in ['us', 'uk']: raise ValueError('Invalid region: %s' % (region)) super(RackspaceFirstGenNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) def list_locations(self): """ Lists available locations Locations cannot be set or retrieved via the API, but currently there are two locations, DFW and ORD. @inherits: :class:`OpenStack_1_0_NodeDriver.list_locations` """ if self.region == 'us': locations = [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] elif self.region == 'uk': locations = [NodeLocation(0, 'Rackspace UK London', 'UK', self)] return locations def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs['region'] = self.region return kwargs class RackspaceConnection(OpenStack_1_1_Connection): """ Connection class for the Rackspace next-gen OpenStack base driver. """ auth_url = AUTH_URL _auth_version = '2.0' def __init__(self, *args, **kwargs): self.region = kwargs.pop('region', None) self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) super(RackspaceConnection, self).__init__(*args, **kwargs) def get_service_name(self): if not self.get_endpoint_args: # if they used ex_force_base_url, assume the Rackspace default return SERVICE_NAME_GEN2 return self.get_endpoint_args.get('name', SERVICE_NAME_GEN2) def get_endpoint(self): if not self.get_endpoint_args: raise LibcloudError( 'RackspaceConnection must have get_endpoint_args set') if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) else: raise LibcloudError( 'Auth version "%s" not supported' % (self._auth_version)) public_url = ep.url if not public_url: raise LibcloudError('Could not find specified endpoint') return public_url class RackspaceNodeDriver(OpenStack_1_1_NodeDriver): name = 'Rackspace Cloud (Next Gen)' website = 'http://www.rackspace.com' connectionCls = RackspaceConnection type = Provider.RACKSPACE _networks_url_prefix = '/os-networksv2' def __init__(self, key, secret=None, secure=True, host=None, port=None, region='dfw', **kwargs): """ @inherits: :class:`NodeDriver.__init__` :param region: ID of the region which should be used. :type region: ``str`` """ valid_regions = ENDPOINT_ARGS_MAP.keys() if region not in valid_regions: raise ValueError('Invalid region: %s' % (region)) if region == 'lon': self.api_name = 'rackspacenovalon' elif region == 'syd': self.api_name = 'rackspacenovasyd' else: self.api_name = 'rackspacenovaus' super(RackspaceNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) def _to_snapshot(self, api_node): if 'snapshot' in api_node: api_node = api_node['snapshot'] extra = {'volume_id': api_node['volumeId'], 'name': api_node['displayName'], 'created': api_node['createdAt'], 'description': api_node['displayDescription'], 'status': api_node['status']} state = self.SNAPSHOT_STATE_MAP.get( api_node['status'], VolumeSnapshotState.UNKNOWN ) try: created_td = parse_date(api_node['createdAt']) except ValueError: created_td = None snapshot = VolumeSnapshot(id=api_node['id'], driver=self, size=api_node['size'], extra=extra, created=created_td, state=state, name=api_node['displayName']) return snapshot def _ex_connection_class_kwargs(self): endpoint_args = ENDPOINT_ARGS_MAP[self.region] kwargs = self.openstack_connection_kwargs() kwargs['region'] = self.region kwargs['get_endpoint_args'] = endpoint_args return kwargs apache-libcloud-2.2.1/libcloud/compute/drivers/serverlove.py0000664000175000017500000000556612701023453024124 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ServerLove Driver """ from libcloud.compute.types import Provider from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection # API end-points API_ENDPOINTS = { 'uk-1': { 'name': 'United Kingdom, Manchester', 'country': 'United Kingdom', 'host': 'api.z1-man.serverlove.com' } } # Default API end-point for the base connection class. DEFAULT_ENDPOINT = 'uk-1' # Retrieved from http://www.serverlove.com/cloud-server-faqs/api-questions/ STANDARD_DRIVES = { '679f5f44-0be7-4745-a658-cccd4334c1aa': { 'uuid': '679f5f44-0be7-4745-a658-cccd4334c1aa', 'description': 'CentOS 5.5', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '5f2e0e29-2937-42b9-b362-d2d07eddbdeb': { 'uuid': '5f2e0e29-2937-42b9-b362-d2d07eddbdeb', 'description': 'Ubuntu Linux 10.04', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '5795b68f-ed26-4639-b41d-c93235062b6b': { 'uuid': '5795b68f-ed26-4639-b41d-c93235062b6b', 'description': 'Debian Linux 5', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '41993a02-0b22-4e49-bb47-0aa8975217e4': { 'uuid': '41993a02-0b22-4e49-bb47-0aa8975217e4', 'description': 'Windows Server 2008 R2 Standard', 'size_gunzipped': '15GB', 'supports_deployment': False, }, '85623ca1-9c2a-4398-a771-9a43c347e86b': { 'uuid': '85623ca1-9c2a-4398-a771-9a43c347e86b', 'description': 'Windows Web Server 2008 R2', 'size_gunzipped': '15GB', 'supports_deployment': False, } } class ServerLoveConnection(ElasticStackBaseConnection): host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] class ServerLoveNodeDriver(ElasticStackBaseNodeDriver): type = Provider.SERVERLOVE api_name = 'serverlove' website = 'http://www.serverlove.com/' name = 'ServerLove' connectionCls = ServerLoveConnection features = {'create_node': ['generates_password']} _standard_drives = STANDARD_DRIVES apache-libcloud-2.2.1/libcloud/compute/drivers/rimuhosting.py0000664000175000017500000002776312715310131024300 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RimuHosting Driver """ try: import simplejson as json except ImportError: import json from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.types import InvalidCredsError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation from libcloud.compute.base import NodeImage API_CONTEXT = '/r' API_HOST = 'rimuhosting.com' class RimuHostingException(Exception): """ Exception class for RimuHosting driver """ def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) class RimuHostingResponse(JsonResponse): """ Response Class for RimuHosting driver """ def success(self): if self.status == 403: raise InvalidCredsError() return True def parse_body(self): try: js = super(RimuHostingResponse, self).parse_body() keys = list(js.keys()) if js[keys[0]]['response_type'] == "ERROR": raise RimuHostingException( js[keys[0]]['human_readable_message'] ) return js[keys[0]] except KeyError: raise RimuHostingException('Could not parse body: %s' % (self.body)) class RimuHostingConnection(ConnectionKey): """ Connection class for the RimuHosting driver """ api_context = API_CONTEXT host = API_HOST port = 443 responseCls = RimuHostingResponse def __init__(self, key, secure=True, retry_delay=None, backoff=None, timeout=None): # override __init__ so that we can set secure of False for testing ConnectionKey.__init__(self, key, secure, timeout=timeout, retry_delay=retry_delay, backoff=backoff) def add_default_headers(self, headers): # We want JSON back from the server. Could be application/xml # (but JSON is better). headers['Accept'] = 'application/json' # Must encode all data as json, or override this header. headers['Content-Type'] = 'application/json' headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key) return headers def request(self, action, params=None, data='', headers=None, method='GET'): if not headers: headers = {} if not params: params = {} # Override this method to prepend the api_context return ConnectionKey.request(self, self.api_context + action, params, data, headers, method) class RimuHostingNodeDriver(NodeDriver): """ RimuHosting node driver """ type = Provider.RIMUHOSTING name = 'RimuHosting' website = 'http://rimuhosting.com/' connectionCls = RimuHostingConnection features = {'create_node': ['password']} def __init__(self, key, host=API_HOST, port=443, api_context=API_CONTEXT, secure=True): """ :param key: API key (required) :type key: ``str`` :param host: hostname for connection :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :param api_context: Optional API context. :type api_context: ``str`` :param secure: Whether to use HTTPS or HTTP. :type secure: ``bool`` :rtype: ``None`` """ # Pass in some extra vars so that self.key = key self.secure = secure self.connection = self.connectionCls(key, secure) self.connection.host = host self.connection.api_context = api_context self.connection.port = port self.connection.driver = self self.connection.connect() def _order_uri(self, node, resource): # Returns the order uri with its resourse appended. return "/orders/%s/%s" % (node.id, resource) # TODO: Get the node state. def _to_node(self, order): n = Node(id=order['slug'], name=order['domain_name'], state=NodeState.RUNNING, public_ips=( [order['allocated_ips']['primary_ip']] + order['allocated_ips']['secondary_ips']), private_ips=[], driver=self.connection.driver, extra={ 'order_oid': order['order_oid'], 'monthly_recurring_fee': order.get( 'billing_info').get('monthly_recurring_fee')}) return n def _to_size(self, plan): return NodeSize( id=plan['pricing_plan_code'], name=plan['pricing_plan_description'], ram=plan['minimum_memory_mb'], disk=plan['minimum_disk_gb'], bandwidth=plan['minimum_data_transfer_allowance_gb'], price=plan['monthly_recurring_amt']['amt_usd'], driver=self.connection.driver ) def _to_image(self, image): return NodeImage(id=image['distro_code'], name=image['distro_description'], driver=self.connection.driver) def list_sizes(self, location=None): # Returns a list of sizes (aka plans) # Get plans. Note this is really just for libcloud. # We are happy with any size. if location is None: location = '' else: location = ";dc_location=%s" % (location.id) res = self.connection.request( '/pricing-plans;server-type=VPS%s' % (location)).object return list(map(lambda x: self._to_size(x), res['pricing_plan_infos'])) def list_nodes(self): # Returns a list of Nodes # Will only include active ones. res = self.connection.request('/orders;include_inactive=N').object return list(map(lambda x: self._to_node(x), res['about_orders'])) def list_images(self, location=None): # Get all base images. # TODO: add other image sources. (Such as a backup of a VPS) # All Images are available for use at all locations res = self.connection.request('/distributions').object return list(map(lambda x: self._to_image(x), res['distro_infos'])) def reboot_node(self, node): # Reboot # PUT the state of RESTARTING to restart a VPS. # All data is encoded as JSON data = {'reboot_request': {'running_state': 'RESTARTING'}} uri = self._order_uri(node, 'vps/running-state') self.connection.request(uri, data=json.dumps(data), method='PUT') # XXX check that the response was actually successful return True def destroy_node(self, node): # Shutdown a VPS. uri = self._order_uri(node, 'vps') self.connection.request(uri, method='DELETE') # XXX check that the response was actually successful return True def create_node(self, **kwargs): """Creates a RimuHosting instance @inherits: :class:`NodeDriver.create_node` :keyword name: Must be a FQDN. e.g example.com. :type name: ``str`` :keyword ex_billing_oid: If not set, a billing method is automatically picked. :type ex_billing_oid: ``str`` :keyword ex_host_server_oid: The host server to set the VPS up on. :type ex_host_server_oid: ``str`` :keyword ex_vps_order_oid_to_clone: Clone another VPS to use as the image for the new VPS. :type ex_vps_order_oid_to_clone: ``str`` :keyword ex_num_ips: Number of IPs to allocate. Defaults to 1. :type ex_num_ips: ``int`` :keyword ex_extra_ip_reason: Reason for needing the extra IPs. :type ex_extra_ip_reason: ``str`` :keyword ex_memory_mb: Memory to allocate to the VPS. :type ex_memory_mb: ``int`` :keyword ex_disk_space_mb: Diskspace to allocate to the VPS. Defaults to 4096 (4GB). :type ex_disk_space_mb: ``int`` :keyword ex_disk_space_2_mb: Secondary disk size allocation. Disabled by default. :type ex_disk_space_2_mb: ``int`` :keyword ex_control_panel: Control panel to install on the VPS. :type ex_control_panel: ``str`` """ # Note we don't do much error checking in this because we # expect the API to error out if there is a problem. name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] data = { 'instantiation_options': { 'domain_name': name, 'distro': image.id }, 'pricing_plan_code': size.id, 'vps_parameters': {} } if 'ex_control_panel' in kwargs: data['instantiation_options']['control_panel'] = \ kwargs['ex_control_panel'] auth = self._get_and_check_auth(kwargs.get('auth')) data['instantiation_options']['password'] = auth.password if 'ex_billing_oid' in kwargs: # TODO check for valid oid. data['billing_oid'] = kwargs['ex_billing_oid'] if 'ex_host_server_oid' in kwargs: data['host_server_oid'] = kwargs['ex_host_server_oid'] if 'ex_vps_order_oid_to_clone' in kwargs: data['vps_order_oid_to_clone'] = \ kwargs['ex_vps_order_oid_to_clone'] if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1: if 'ex_extra_ip_reason' not in kwargs: raise RimuHostingException( 'Need an reason for having an extra IP') else: if 'ip_request' not in data: data['ip_request'] = {} data['ip_request']['num_ips'] = int(kwargs['ex_num_ips']) data['ip_request']['extra_ip_reason'] = \ kwargs['ex_extra_ip_reason'] if 'ex_memory_mb' in kwargs: data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] if 'ex_disk_space_mb' in kwargs: data['vps_parameters']['disk_space_mb'] = \ kwargs['ex_disk_space_mb'] if 'ex_disk_space_2_mb' in kwargs: data['vps_parameters']['disk_space_2_mb'] =\ kwargs['ex_disk_space_2_mb'] # Don't send empty 'vps_parameters' attribute if not data['vps_parameters']: del data['vps_parameters'] res = self.connection.request( '/orders/new-vps', method='POST', data=json.dumps({"new-vps": data}) ).object node = self._to_node(res['about_order']) node.extra['password'] = \ res['new_order_request']['instantiation_options']['password'] return node def list_locations(self): return [ NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self), NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self), NodeLocation('DCLONDON', "RimuHosting London", 'GB', self), NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self), ] apache-libcloud-2.2.1/libcloud/compute/drivers/auroracompute.py0000664000175000017500000000377013153541406024616 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver __all__ = [ 'AuroraComputeRegion', 'AuroraComputeNodeDriver' ] class AuroraComputeRegion(object): AMS = 'Amsterdam' RTD = 'Rotterdam' MIA = 'Miami' LAX = 'Los Angeles' TYO = 'Tokyo' BCN = 'Barcelona' REGION_ENDPOINT_MAP = { AuroraComputeRegion.AMS: '/ams', AuroraComputeRegion.RTD: '/rtd', AuroraComputeRegion.MIA: '/mia', AuroraComputeRegion.LAX: '/lax', AuroraComputeRegion.TYO: '/tyo', AuroraComputeRegion.BCN: '/bcn' } class AuroraComputeNodeDriver(CloudStackNodeDriver): type = Provider.AURORACOMPUTE name = 'PCextreme AuroraCompute' website = 'https://www.pcextreme.com/aurora/compute' def __init__(self, key, secret, path=None, host=None, url=None, region=None): if host is None: host = 'api.auroracompute.eu' if path is None: path = REGION_ENDPOINT_MAP.get(region, '/ams') super(AuroraComputeNodeDriver, self).__init__(key=key, secret=secret, host=host, path=path, secure=True) apache-libcloud-2.2.1/libcloud/compute/drivers/vsphere.py0000664000175000017500000004242113153541406023400 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VMware vSphere driver supporting vSphere v5.5. Note: This driver requires pysphere package (https://pypi.python.org/pypi/pysphere) which can be installed using pip. For more information, please refer to the official documentation. """ import os import sys import atexit try: import pysphere pysphere except ImportError: raise ImportError('Missing "pysphere" dependency. You can install it ' 'using pip - pip install pysphere') from pysphere import VIServer from pysphere.vi_task import VITask from pysphere.vi_mor import VIMor, MORTypes from pysphere.resources import VimService_services as VI from pysphere.vi_virtual_machine import VIVirtualMachine from libcloud.utils.decorators import wrap_non_libcloud_exceptions from libcloud.common.base import ConnectionUserAndKey from libcloud.common.types import LibcloudError from libcloud.common.types import InvalidCredsError from libcloud.compute.base import NodeDriver from libcloud.compute.base import NodeLocation from libcloud.compute.base import NodeImage from libcloud.compute.base import Node from libcloud.compute.types import NodeState, Provider from libcloud.utils.networking import is_public_subnet __all__ = [ 'VSphereNodeDriver', 'VSphere_5_5_NodeDriver' ] DEFAULT_API_VERSION = '5.5' DEFAULT_CONNECTION_TIMEOUT = 5 # default connection timeout in seconds class VSphereConnection(ConnectionUserAndKey): def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, **kwargs): if host and url: raise ValueError('host and url arguments are mutually exclusive') if host: host_or_url = host elif url: host_or_url = url else: raise ValueError('Either "host" or "url" argument must be ' 'provided') self.host_or_url = host_or_url self.client = None super(VSphereConnection, self).__init__(user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, **kwargs) def connect(self): self.client = VIServer() trace_file = os.environ.get('LIBCLOUD_DEBUG', None) try: self.client.connect(host=self.host_or_url, user=self.user_id, password=self.key, sock_timeout=DEFAULT_CONNECTION_TIMEOUT, trace_file=trace_file) except Exception: e = sys.exc_info()[1] message = e.message if hasattr(e, 'strerror'): message = e.strerror fault = getattr(e, 'fault', None) if fault == 'InvalidLoginFault': raise InvalidCredsError(message) raise LibcloudError(value=message, driver=self.driver) atexit.register(self.disconnect) def disconnect(self): if not self.client: return try: self.client.disconnect() except Exception: # Ignore all the disconnect errors pass def run_client_method(self, method_name, **method_kwargs): method = getattr(self.client, method_name, None) return method(**method_kwargs) class VSphereNodeDriver(NodeDriver): name = 'VMware vSphere' website = 'http://www.vmware.com/products/vsphere/' type = Provider.VSPHERE connectionCls = VSphereConnection NODE_STATE_MAP = { 'POWERED ON': NodeState.RUNNING, 'POWERED OFF': NodeState.STOPPED, 'SUSPENDED': NodeState.SUSPENDED, 'POWERING ON': NodeState.PENDING, 'POWERING OFF': NodeState.PENDING, 'SUSPENDING': NodeState.PENDING, 'RESETTING': NodeState.PENDING, 'BLOCKED ON MSG': NodeState.ERROR, 'REVERTING TO SNAPSHOT': NodeState.PENDING } def __new__(cls, username, password, secure=True, host=None, port=None, url=None, api_version=DEFAULT_API_VERSION, **kwargs): if cls is VSphereNodeDriver: if api_version == '5.5': cls = VSphere_5_5_NodeDriver else: raise NotImplementedError('Unsupported API version: %s' % (api_version)) return super(VSphereNodeDriver, cls).__new__(cls) def __init__(self, username, password, secure=True, host=None, port=None, url=None, timeout=None): self.url = url super(VSphereNodeDriver, self).__init__(key=username, secret=password, secure=secure, host=host, port=port, url=url) @wrap_non_libcloud_exceptions def list_locations(self): """ List available locations. In vSphere case, a location represents a datacenter. """ datacenters = self.connection.client.get_datacenters() locations = [] for id, name in datacenters.items(): location = NodeLocation(id=id, name=name, country=None, driver=self) locations.append(location) return locations @wrap_non_libcloud_exceptions def list_images(self): """ List available images (templates). """ server = self.connection.client names = ['name', 'config.uuid', 'config.template'] properties = server._retrieve_properties_traversal( property_names=names, from_node=None, obj_type=MORTypes.VirtualMachine) images = [] for prop in properties: id = None name = None is_template = False for item in prop.PropSet: if item.Name == 'config.uuid': id = item.Val if item.Name == 'name': name = item.Val elif item.Name == 'config.template': is_template = item.Val if is_template: image = NodeImage(id=id, name=name, driver=self) images.append(image) return images @wrap_non_libcloud_exceptions def list_nodes(self): vm_paths = self.connection.client.get_registered_vms() nodes = self._to_nodes(vm_paths=vm_paths) return nodes @wrap_non_libcloud_exceptions @wrap_non_libcloud_exceptions def ex_clone_node(self, node, name, power_on=True, template=False): """ Clone the provided node. :param node: Node to clone. :type node: :class:`libcloud.compute.base.Node` :param name: Name of the new node. :type name: ``str`` :param power_on: Power the new node on after being created. :type power_on: ``bool`` :param template: Specifies whether or not the new virtual machine should be marked as a template. :type template: ``bool`` :return: New node. :rtype: :class:`libcloud.compute.base.Node` """ vm = self._get_vm_for_node(node=node) new_vm = vm.clone(name=name, power_on=power_on, template=template) new_node = self._to_node(vm=new_vm) return new_node @wrap_non_libcloud_exceptions def ex_migrate_node(self, node, resource_pool=None, host=None, priority='default'): """ Migrate provided node to a new host or resource pool. :param node: Node to clone. :type node: :class:`libcloud.compute.base.Node` :param resource_pool: ID of the target resource pool to migrate the node into. :type resource_pool: ``str`` :param host: Target host to migrate the host to. :type host: ``str`` :param priority: Migration task priority. Possible values: default, high, low. :type priority: ``str`` :return: True on success. :rtype: ``bool`` """ vm = self._get_vm_for_node(node=node) vm.migrate(priority=priority, resource_pool=resource_pool, host=host) return True @wrap_non_libcloud_exceptions def reboot_node(self, node): vm = self._get_vm_for_node(node=node) vm.reset() return True @wrap_non_libcloud_exceptions def destroy_node(self, node, ex_remove_files=True): """ :param ex_remove_files: Remove all the files from the datastore. :type ex_remove_files: ``bool`` """ ex_remove_files = False vm = self._get_vm_for_node(node=node) server = self.connection.client # Based on code from # https://pypi.python.org/pypi/pyxenter if ex_remove_files: request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) ret = server._proxy.Destroy_Task(request)._returnval task = VITask(ret, server) # Wait for the task to finish status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_ERROR: raise LibcloudError('Error destroying node: %s' % (task.get_error_message())) else: request = VI.UnregisterVMRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) ret = server._proxy.UnregisterVM(request) task = VITask(ret, server) return True @wrap_non_libcloud_exceptions def ex_stop_node(self, node): vm = self._get_vm_for_node(node=node) vm.power_off() return True @wrap_non_libcloud_exceptions def ex_start_node(self, node): vm = self._get_vm_for_node(node=node) vm.power_on() return True @wrap_non_libcloud_exceptions def ex_suspend_node(self, node): vm = self._get_vm_for_node(node=node) vm.suspend() return True @wrap_non_libcloud_exceptions def ex_get_resource_pools(self): """ Return all the available resource pools. :rtype: ``dict`` """ result = self.connection.client.get_resource_pools() return result @wrap_non_libcloud_exceptions def ex_get_resource_pool_name(self, node): """ Retrieve resource pool name for the provided node. :rtype: ``str`` """ vm = self._get_vm_for_node(node=node) return vm.get_resource_pool_name() @wrap_non_libcloud_exceptions def ex_get_hosts(self): """ Return all the available hosts. :rtype: ``dict`` """ result = self.connection.client.get_hosts() return result @wrap_non_libcloud_exceptions def ex_get_datastores(self): """ Return all the available datastores. :rtype: ``dict`` """ result = self.connection.client.get_datastores() return result @wrap_non_libcloud_exceptions def ex_get_node_by_path(self, path): """ Retrieve Node object for a VM with a provided path. :type path: ``str`` :rtype: :class:`libcloud.compute.base.Node` """ vm = self.connection.client.get_vm_by_path(path) node = self._to_node(vm=vm) return node def ex_get_node_by_uuid(self, uuid): """ Retrieve Node object for a VM with a provided uuid. :type uuid: ``str`` """ vm = self._get_vm_for_uuid(uuid=uuid) node = self._to_node(vm=vm) return node @wrap_non_libcloud_exceptions def ex_get_server_type(self): """ Return VMware installation type. :rtype: ``str`` """ return self.connection.client.get_server_type() @wrap_non_libcloud_exceptions def ex_get_api_version(self): """ Return API version of the vmware provider. :rtype: ``str`` """ return self.connection.client.get_api_version() def _get_vm_for_uuid(self, uuid, datacenter=None): """ Retrieve VM for the provided UUID. :type uuid: ``str`` """ server = self.connection.client dc_list = [] if datacenter and VIMor.is_mor(datacenter): dc_list.append(datacenter) else: dc = server.get_datacenters() if datacenter: dc_list = [k for k, v in dc.iteritems() if v == datacenter] else: dc_list = list(dc.iterkeys()) for mor_dc in dc_list: request = VI.FindByUuidRequestMsg() search_index = server._do_service_content.SearchIndex mor_search_index = request.new__this(search_index) mor_search_index.set_attribute_type(MORTypes.SearchIndex) request.set_element__this(mor_search_index) mor_datacenter = request.new_datacenter(mor_dc) mor_datacenter.set_attribute_type(MORTypes.Datacenter) request.set_element_datacenter(mor_datacenter) request.set_element_vmSearch(True) request.set_element_uuid(uuid) try: vm = server._proxy.FindByUuid(request)._returnval except VI.ZSI.FaultException: pass else: if vm: return VIVirtualMachine(server, vm) return None def _to_nodes(self, vm_paths): nodes = [] for vm_path in vm_paths: vm = self.connection.client.get_vm_by_path(vm_path) node = self._to_node(vm=vm) nodes.append(node) return nodes def _to_node(self, vm): assert(isinstance(vm, VIVirtualMachine)) properties = vm.get_properties() status = vm.get_status() uuid = vm.properties.config.uuid instance_uuid = vm.properties.config.instanceUuid id = uuid name = properties['name'] public_ips = [] private_ips = [] state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN) ip_address = properties.get('ip_address', None) net = properties.get('net', []) resource_pool_id = str(vm.properties.resourcePool._obj) try: operating_system = vm.properties.summary.guest.guestFullName, except Exception: operating_system = 'unknown' extra = { 'uuid': uuid, 'instance_uuid': instance_uuid, 'path': properties['path'], 'resource_pool_id': resource_pool_id, 'hostname': properties.get('hostname', None), 'guest_id': properties['guest_id'], 'devices': properties.get('devices', {}), 'disks': properties.get('disks', []), 'net': net, 'overall_status': vm.properties.overallStatus, 'operating_system': operating_system, 'cpus': vm.properties.config.hardware.numCPU, 'memory_mb': vm.properties.config.hardware.memoryMB } # Add primary IP if ip_address: if is_public_subnet(ip_address): public_ips.append(ip_address) else: private_ips.append(ip_address) # Add other IP addresses for nic in net: ip_addresses = nic['ip_addresses'] for ip_address in ip_addresses: try: is_public = is_public_subnet(ip_address) except Exception: # TODO: Better support for IPv6 is_public = False if is_public: public_ips.append(ip_address) else: private_ips.append(ip_address) # Remove duplicate IPs public_ips = list(set(public_ips)) private_ips = list(set(private_ips)) node = Node(id=id, name=name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra=extra) return node def _get_vm_for_node(self, node): uuid = node.id vm = self._get_vm_for_uuid(uuid=uuid) return vm def _ex_connection_class_kwargs(self): kwargs = { 'url': self.url } return kwargs class VSphere_5_5_NodeDriver(VSphereNodeDriver): name = 'VMware vSphere v5.5' apache-libcloud-2.2.1/libcloud/compute/drivers/cloudwatt.py0000664000175000017500000001206312701023453023724 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cloudwatt driver. """ import sys try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.compute.types import Provider from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver from libcloud.common.openstack_identity import OpenStackIdentityConnection from libcloud.utils.iso8601 import parse_date from libcloud.compute.types import InvalidCredsError, MalformedResponseError __all__ = [ 'CloudwattNodeDriver' ] BASE_URL = 'https://identity.fr1.cloudwatt.com/v2.0' AUTH_URL = BASE_URL + '/tokens' class CloudwattAuthConnection(OpenStackIdentityConnection): """ AuthConnection class for the Cloudwatt driver. """ name = 'Cloudwatt Auth' def __init__(self, *args, **kwargs): self._ex_tenant_id = kwargs.pop('ex_tenant_id') super(CloudwattAuthConnection, self).__init__(*args, **kwargs) def authenticate(self, force=False): reqbody = json.dumps({'auth': { 'passwordCredentials': { 'username': self.user_id, 'password': self.key }, 'tenantId': self._ex_tenant_id }}) resp = self.request('/tokens', data=reqbody, headers={}, method='POST') if resp.status == httplib.UNAUTHORIZED: # HTTP UNAUTHORIZED (401): auth failed raise InvalidCredsError() elif resp.status != httplib.OK: body = 'code: %s body:%s' % (resp.status, resp.body) raise MalformedResponseError('Malformed response', body=body, driver=self.driver) else: try: body = json.loads(resp.body) except Exception: e = sys.exc_info()[1] raise MalformedResponseError('Failed to parse JSON', e) try: expires = body['access']['token']['expires'] self.auth_token = body['access']['token']['id'] self.auth_token_expires = parse_date(expires) self.urls = body['access']['serviceCatalog'] self.auth_user_info = None except KeyError: e = sys.exc_info()[1] raise MalformedResponseError('Auth JSON response is \ missing required elements', e) return self class CloudwattConnection(OpenStack_1_1_Connection): """ Connection class for the Cloudwatt driver. """ auth_url = BASE_URL service_region = 'fr1' service_type = 'compute' def __init__(self, *args, **kwargs): self.ex_tenant_id = kwargs.pop('ex_tenant_id') super(CloudwattConnection, self).__init__(*args, **kwargs) osa = CloudwattAuthConnection( auth_url=AUTH_URL, user_id=self.user_id, key=self.key, tenant_name=self._ex_tenant_name, timeout=self.timeout, ex_tenant_id=self.ex_tenant_id, parent_conn=self ) self._osa = osa self._auth_version = '2.0' class CloudwattNodeDriver(OpenStack_1_1_NodeDriver): """ Implements the :class:`NodeDriver`'s for Cloudwatt. """ name = 'Cloudwatt' website = 'https://www.cloudwatt.com/' connectionCls = CloudwattConnection type = Provider.CLOUDWATT def __init__(self, key, secret, tenant_id, secure=True, tenant_name=None, host=None, port=None, **kwargs): """ @inherits: :class:`NodeDriver.__init__` :param tenant_id: ID of tenant required for Cloudwatt auth :type tenant_id: ``str`` """ self.ex_tenant_id = tenant_id self.extra = {} super(CloudwattNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, **kwargs ) def attach_volume(self, node, volume, device=None): return super(CloudwattNodeDriver, self)\ .attach_volume(node, volume, device) def _ex_connection_class_kwargs(self): """ Includes ``tenant_id`` in Connection. """ return { 'ex_tenant_id': self.ex_tenant_id } apache-libcloud-2.2.1/libcloud/compute/drivers/bsnl.py0000664000175000017500000000364612701023453022663 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.common.dimensiondata import (DimensionDataConnection, API_ENDPOINTS) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver DEFAULT_REGION = 'bsnl-in' class BSNLNodeDriver(DimensionDataNodeDriver): """ BSNL node driver, based on Dimension Data driver """ selected_region = None connectionCls = DimensionDataConnection name = 'BSNL' website = 'http://www.bsnlcloud.com/' type = Provider.BSNL features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self.selected_region = API_ENDPOINTS[region] super(BSNLNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/ecs.py0000664000175000017500000017164513153541406022511 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Node driver for Aliyun. """ try: import simplejson as json except ImportError: import json import time from libcloud.common.aliyun import AliyunXmlResponse, SignedAliyunConnection from libcloud.common.types import LibcloudError from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize, \ StorageVolume, VolumeSnapshot, NodeLocation from libcloud.compute.types import NodeState, StorageVolumeState, \ VolumeSnapshotState from libcloud.utils.py3 import _real_unicode as u from libcloud.utils.xml import findall, findattr, findtext __all__ = [ 'DiskCategory', 'InternetChargeType', 'ECS_API_VERSION', 'ECSDriver', 'ECSSecurityGroup', 'ECSZone' ] ECS_API_VERSION = '2014-05-26' ECS_API_ENDPOINT = 'ecs.aliyuncs.com' DEFAULT_SIGNATURE_VERSION = '1.0' def _parse_bool(value): if isinstance(value, bool): return value if u(value).lower() == 'true': return True return False """ Define the extra dictionary for specific resources """ RESOURCE_EXTRA_ATTRIBUTES_MAP = { 'node': { 'description': { 'xpath': 'Description', 'transform_func': u }, 'image_id': { 'xpath': 'ImageId', 'transform_func': u }, 'zone_id': { 'xpath': 'ZoneId', 'transform_func': u }, 'instance_type': { 'xpath': 'InstanceType', 'transform_func': u }, 'instance_type_family': { 'xpath': 'InstanceTypeFamily', 'transform_func': u }, 'hostname': { 'xpath': 'HostName', 'transform_func': u }, 'serial_number': { 'xpath': 'SerialNumber', 'transform_func': u }, 'internet_charge_type': { 'xpath': 'InternetChargeType', 'transform_func': u }, 'creation_time': { 'xpath': 'CreationTime', 'transform_func': u }, 'instance_network_type': { 'xpath': 'InstanceNetworkType', 'transform_func': u }, 'instance_charge_type': { 'xpath': 'InstanceChargeType', 'transform_func': u }, 'device_available': { 'xpath': 'DeviceAvailable', 'transform_func': u }, 'io_optimized': { 'xpath': 'IoOptimized', 'transform_func': u }, 'expired_time': { 'xpath': 'ExpiredTime', 'transform_func': u } }, 'vpc_attributes': { 'vpc_id': { 'xpath': 'VpcId', 'transform_func': u }, 'vswitch_id': { 'xpath': 'VSwitchId', 'transform_func': u }, 'private_ip_address': { 'xpath': 'PrivateIpAddress/IpAddress', 'transform_func': u }, 'nat_ip_address': { 'xpath': 'NatIpAddress', 'transform_func': u } }, 'eip_address_associate': { 'allocation_id': { 'xpath': 'AllocationId', 'transform_func': u }, 'ip_address': { 'xpath': 'IpAddress', 'transform_func': u }, 'bandwidth': { 'xpath': 'Bandwidth', 'transform_func': int }, 'internet_charge_type': { 'xpath': 'InternetChargeType', 'transform_func': u } }, 'operation_locks': { 'lock_reason': { 'xpath': 'LockReason', 'transform_func': u } }, 'volume': { 'region_id': { 'xpath': 'RegionId', 'transform_func': u }, 'zone_id': { 'xpath': 'ZoneId', 'transform_func': u }, 'description': { 'xpath': 'Description', 'transform_func': u }, 'type': { 'xpath': 'Type', 'transform_func': u }, 'category': { 'xpath': 'Category', 'transform_func': u }, 'image_id': { 'xpath': 'ImageId', 'transform_func': u }, 'source_snapshot_id': { 'xpath': 'SourceSnapshotId', 'transform_func': u }, 'product_code': { 'xpath': 'ProductCode', 'transform_func': u }, 'portable': { 'xpath': 'Portable', 'transform_func': _parse_bool }, 'instance_id': { 'xpath': 'InstanceId', 'transform_func': u }, 'device': { 'xpath': 'Device', 'transform_func': u }, 'delete_with_instance': { 'xpath': 'DeleteWithInstance', 'transform_func': _parse_bool }, 'enable_auto_snapshot': { 'xpath': 'EnableAutoSnapshot', 'transform_func': _parse_bool }, 'creation_time': { 'xpath': 'CreationTime', 'transform_func': u }, 'attached_time': { 'xpath': 'AttachedTime', 'transform_func': u }, 'detached_time': { 'xpath': 'DetachedTime', 'transform_func': u }, 'disk_charge_type': { 'xpath': 'DiskChargeType', 'transform_func': u } }, 'snapshot': { 'snapshot_name': { 'xpath': 'SnapshotName', 'transform_func': u }, 'description': { 'xpath': 'Description', 'transform_func': u }, 'progress': { 'xpath': 'Progress', 'transform_func': u }, 'source_disk_id': { 'xpath': 'SourceDiskId', 'transform_func': u }, 'source_disk_size': { 'xpath': 'SourceDiskSize', 'transform_func': int }, 'source_disk_type': { 'xpath': 'SourceDiskType', 'transform_func': u }, 'product_code': { 'xpath': 'ProductCode', 'transform_func': u }, 'usage': { 'xpath': 'Usage', 'transform_func': u } }, 'image': { 'image_version': { 'xpath': 'ImageVersion', 'transform_func': u }, 'os_type': { 'xpath': 'OSType', 'transform_func': u }, 'platform': { 'xpath': 'Platform', 'transform_func': u }, 'architecture': { 'xpath': 'Architecture', 'transform_func': u }, 'description': { 'xpath': 'Description', 'transform_func': u }, 'size': { 'xpath': 'Size', 'transform_func': int }, 'image_owner_alias': { 'xpath': 'ImageOwnerAlias', 'transform_func': u }, 'os_name': { 'xpath': 'OSName', 'transform_func': u }, 'product_code': { 'xpath': 'ProductCode', 'transform_func': u }, 'is_subscribed': { 'xpath': 'IsSubscribed', 'transform_func': _parse_bool }, 'progress': { 'xpath': 'Progress', 'transform_func': u }, 'creation_time': { 'xpath': 'CreationTime', 'transform_func': u }, 'usage': { 'xpath': 'Usage', 'transform_func': u }, 'is_copied': { 'xpath': 'IsCopied', 'transform_func': _parse_bool } }, 'disk_device_mapping': { 'snapshot_id': { 'xpath': 'SnapshotId', 'transform_func': u }, 'size': { 'xpath': 'Size', 'transform_func': int }, 'device': { 'xpath': 'Device', 'transform_func': u }, 'format': { 'xpath': 'Format', 'transform_func': u }, 'import_oss_bucket': { 'xpath': 'ImportOSSBucket', 'transform_func': u }, 'import_oss_object': { 'xpath': 'ImportOSSObject', 'transform_func': u } } } class ECSConnection(SignedAliyunConnection): """ Represents a single connection to the Aliyun ECS Endpoint. """ api_version = ECS_API_VERSION host = ECS_API_ENDPOINT responseCls = AliyunXmlResponse service_name = 'ecs' class ECSSecurityGroup(object): """ Security group used to control nodes internet and intranet accessibility. """ def __init__(self, id, name, description=None, driver=None, vpc_id=None, creation_time=None): self.id = id self.name = name self.description = description self.driver = driver self.vpc_id = vpc_id self.creation_time = creation_time def __repr__(self): return ('' % (self.id, self.name, self.driver.name)) class ECSSecurityGroupAttribute(object): """ Security group attribute. """ def __init__(self, ip_protocol=None, port_range=None, source_group_id=None, policy=None, nic_type=None): self.ip_protocol = ip_protocol self.port_range = port_range self.source_group_id = source_group_id self.policy = policy self.nic_type = nic_type def __repr__(self): return ('' % (self.ip_protocol)) class ECSZone(object): """ ECSZone used to represent an availability zone in a region. """ def __init__(self, id, name, driver=None, available_resource_types=None, available_instance_types=None, available_disk_categories=None): self.id = id self.name = name self.driver = driver self.available_resource_types = available_resource_types self.available_instance_types = available_instance_types self.available_disk_categories = available_disk_categories def __repr__(self): return ('' % (self.id, self.name, self.driver)) class InternetChargeType(object): """ Internet connection billing types for Aliyun Nodes. """ BY_BANDWIDTH = 'PayByBandwidth' BY_TRAFFIC = 'PayByTraffic' class DiskCategory(object): """ Enum defined disk types supported by Aliyun system and data disks. """ CLOUD = 'cloud' CLOUD_EFFICIENCY = 'cloud_efficiency' CLOUD_SSD = 'cloud_ssd' EPHEMERAL_SSD = 'ephemeral_ssd' class Pagination(object): """ Pagination used to describe the multiple pages results. """ def __init__(self, total, size, current): """ Create a pagination. :param total: the total count of the results :param size: the page size of each page :param current: the current page number, 1-based """ self.total = total self.size = size self.current = current def next(self): """ Switch to the next page. :return: the new pagination or None when no more page :rtype: ``Pagination`` """ if self.total is None or (self.size * self.current >= self.total): return None self.current += 1 return self def to_dict(self): return {'PageNumber': self.current, 'PageSize': self.size} def __repr__(self): return ('' % (self.total, self.size, self.current)) class ECSDriver(NodeDriver): """ Aliyun ECS node driver. Used for Aliyun ECS service. TODO: Get guest OS root password Adjust internet bandwidth settings Manage security groups and rules """ name = 'Aliyun ECS' website = 'https://www.aliyun.com/product/ecs' connectionCls = ECSConnection features = {'create_node': ['password']} namespace = None path = '/' internet_charge_types = InternetChargeType disk_categories = DiskCategory NODE_STATE_MAPPING = { 'Starting': NodeState.PENDING, 'Running': NodeState.RUNNING, 'Stopping': NodeState.PENDING, 'Stopped': NodeState.STOPPED } VOLUME_STATE_MAPPING = { 'In_use': StorageVolumeState.INUSE, 'Available': StorageVolumeState.AVAILABLE, 'Attaching': StorageVolumeState.ATTACHING, 'Detaching': StorageVolumeState.INUSE, 'Creating': StorageVolumeState.CREATING, 'ReIniting': StorageVolumeState.CREATING} SNAPSHOT_STATE_MAPPING = { 'progressing': VolumeSnapshotState.CREATING, 'accomplished': VolumeSnapshotState.AVAILABLE, 'failed': VolumeSnapshotState.ERROR} def list_nodes(self, ex_node_ids=None, ex_filters=None): """ List all nodes. @inherits: :class:`NodeDriver.create_node` :keyword ex_node_ids: a list of node's ids used to filter nodes. Only the nodes which's id in this list will be returned. :type ex_node_ids: ``list`` of ``str`` :keyword ex_filters: node attribute and value pairs to filter nodes. Only the nodes which matchs all the pairs will be returned. If the filter attribute need a json array value, use ``list`` object, the driver will convert it. :type ex_filters: ``dict`` """ params = {'Action': 'DescribeInstances', 'RegionId': self.region} if ex_node_ids: if isinstance(ex_node_ids, list): params['InstanceIds'] = self._list_to_json_array(ex_node_ids) else: raise AttributeError('ex_node_ids should be a list of ' 'node ids.') if ex_filters: if isinstance(ex_filters, dict): params.update(ex_filters) else: raise AttributeError('ex_filters should be a dict of ' 'node attributes.') nodes = self._request_multiple_pages(self.path, params, self._to_nodes) return nodes def list_sizes(self, location=None): params = {'Action': 'DescribeInstanceTypes'} resp_body = self.connection.request(self.path, params).object size_elements = findall(resp_body, 'InstanceTypes/InstanceType', namespace=self.namespace) sizes = [self._to_size(each) for each in size_elements] return sizes def list_locations(self): params = {'Action': 'DescribeRegions'} resp_body = self.connection.request(self.path, params).object location_elements = findall(resp_body, 'Regions/Region', namespace=self.namespace) locations = [self._to_location(each) for each in location_elements] return locations def create_node(self, name, size, image, auth=None, ex_security_group_id=None, ex_description=None, ex_internet_charge_type=None, ex_internet_max_bandwidth_out=None, ex_internet_max_bandwidth_in=None, ex_hostname=None, ex_io_optimized=None, ex_system_disk=None, ex_data_disks=None, ex_vswitch_id=None, ex_private_ip_address=None, ex_client_token=None, **kwargs): """ @inherits: :class:`NodeDriver.create_node` :param name: The name for this new node (required) :type name: ``str`` :param image: The image to use when creating this node (required) :type image: `NodeImage` :param size: The size of the node to create (required) :type size: `NodeSize` :keyword auth: Initial authentication information for the node (optional) :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` :keyword ex_security_group_id: The id of the security group the new created node is attached to. (required) :type ex_security_group_id: ``str`` :keyword ex_description: A description string for this node (optional) :type ex_description: ``str`` :keyword ex_internet_charge_type: The internet charge type (optional) :type ex_internet_charge_type: a ``str`` of 'PayByTraffic' or 'PayByBandwidth' :keyword ex_internet_max_bandwidth_out: The max output bandwidth, in Mbps (optional) Required for 'PayByTraffic' internet charge type :type ex_internet_max_bandwidth_out: a ``int`` in range [0, 100] a ``int`` in range [1, 100] for 'PayByTraffic' internet charge type :keyword ex_internet_max_bandwidth_in: The max input bandwidth, in Mbps (optional) :type ex_internet_max_bandwidth_in: a ``int`` in range [1, 200] default to 200 in server side :keyword ex_hostname: The hostname for the node (optional) :type ex_hostname: ``str`` :keyword ex_io_optimized: Whether the node is IO optimized (optional) :type ex_io_optimized: ``boll`` :keyword ex_system_disk: The system disk for the node (optional) :type ex_system_disk: ``dict`` :keyword ex_data_disks: The data disks for the node (optional) :type ex_data_disks: a `list` of `dict` :keyword ex_vswitch_id: The id of vswitch for a VPC type node (optional) :type ex_vswitch_id: ``str`` :keyword ex_private_ip_address: The IP address in private network (optional) :type ex_private_ip_address: ``str`` :keyword ex_client_token: A token generated by client to keep requests idempotency (optional) :type keyword ex_client_token: ``str`` """ params = {'Action': 'CreateInstance', 'RegionId': self.region, 'ImageId': image.id, 'InstanceType': size.id, 'InstanceName': name} if not ex_security_group_id: raise AttributeError('ex_security_group_id is mandatory') params['SecurityGroupId'] = ex_security_group_id if ex_description: params['Description'] = ex_description inet_params = self._get_internet_related_params( ex_internet_charge_type, ex_internet_max_bandwidth_in, ex_internet_max_bandwidth_out) if inet_params: params.update(inet_params) if ex_hostname: params['HostName'] = ex_hostname if auth: auth = self._get_and_check_auth(auth) params['Password'] = auth.password if ex_io_optimized is not None: optimized = ex_io_optimized if isinstance(optimized, bool): optimized = 'optimized' if optimized else 'none' params['IoOptimized'] = optimized if ex_system_disk: system_disk = self._get_system_disk(ex_system_disk) if system_disk: params.update(system_disk) if ex_data_disks: data_disks = self._get_data_disks(ex_data_disks) if data_disks: params.update(data_disks) if ex_vswitch_id: params['VSwitchId'] = ex_vswitch_id if ex_private_ip_address: if not ex_vswitch_id: raise AttributeError('must provide ex_private_ip_address ' 'and ex_vswitch_id at the same time') else: params['PrivateIpAddress'] = ex_private_ip_address if ex_client_token: params['ClientToken'] = ex_client_token resp = self.connection.request(self.path, params=params) node_id = findtext(resp.object, xpath='InstanceId', namespace=self.namespace) nodes = self.list_nodes(ex_node_ids=[node_id]) if len(nodes) != 1: raise LibcloudError('could not find the new created node ' 'with id %s. ' % node_id, driver=self) node = nodes[0] self.ex_start_node(node) self._wait_until_state(nodes, NodeState.RUNNING) return node def reboot_node(self, node, ex_force_stop=False): """ Reboot the given node @inherits :class:`NodeDriver.reboot_node` :keyword ex_force_stop: if ``True``, stop node force (maybe lose data) otherwise, stop node normally, default to ``False`` :type ex_force_stop: ``bool`` """ params = {'Action': 'RebootInstance', 'InstanceId': node.id, 'ForceStop': u(ex_force_stop).lower()} resp = self.connection.request(self.path, params=params) return resp.success() and \ self._wait_until_state([node], NodeState.RUNNING) def destroy_node(self, node): nodes = self.list_nodes(ex_node_ids=[node.id]) if len(nodes) != 1 and node.id != nodes[0].id: raise LibcloudError('could not find the node with id %s.' % node.id) current = nodes[0] if current.state == NodeState.RUNNING: # stop node first self.ex_stop_node(node) self._wait_until_state(nodes, NodeState.STOPPED) params = {'Action': 'DeleteInstance', 'InstanceId': node.id} resp = self.connection.request(self.path, params) return resp.success() def ex_start_node(self, node): """ Start node to running state. :param node: the ``Node`` object to start :type node: ``Node`` :return: starting operation result. :rtype: ``bool`` """ params = {'Action': 'StartInstance', 'InstanceId': node.id} resp = self.connection.request(self.path, params) return resp.success() and \ self._wait_until_state([node], NodeState.RUNNING) def ex_stop_node(self, node, ex_force_stop=False): """ Stop a running node. :param node: The node to stop :type node: :class:`Node` :keyword ex_force_stop: if ``True``, stop node force (maybe lose data) otherwise, stop node normally, default to ``False`` :type ex_force_stop: ``bool`` :return: stopping operation result. :rtype: ``bool`` """ params = {'Action': 'StopInstance', 'InstanceId': node.id, 'ForceStop': u(ex_force_stop).lower()} resp = self.connection.request(self.path, params) return resp.success() and \ self._wait_until_state([node], NodeState.STOPPED) def ex_create_security_group(self, description=None, client_token=None): """ Create a new security group. :keyword description: security group description :type description: ``unicode`` :keyword client_token: a token generated by client to identify each request. :type client_token: ``str`` """ params = {'Action': 'CreateSecurityGroup', 'RegionId': self.region} if description: params['Description'] = description if client_token: params['ClientToken'] = client_token resp = self.connection.request(self.path, params) return findtext(resp.object, 'SecurityGroupId', namespace=self.namespace) def ex_delete_security_group_by_id(self, group_id=None): """ Delete a new security group. :keyword group_id: security group id :type group_id: ``str`` """ params = {'Action': 'DeleteSecurityGroup', 'RegionId': self.region, 'SecurityGroupId': group_id} resp = self.connection.request(self.path, params) return resp.success() def ex_list_security_groups(self, ex_filters=None): """ List security groups in the current region. :keyword ex_filters: security group attributes to filter results. :type ex_filters: ``dict`` :return: a list of defined security groups :rtype: ``list`` of ``ECSSecurityGroup`` """ params = {'Action': 'DescribeSecurityGroups', 'RegionId': self.region} if ex_filters and isinstance(ex_filters, dict): ex_filters.update(params) params = ex_filters def _parse_response(resp_object): sg_elements = findall(resp_object, 'SecurityGroups/SecurityGroup', namespace=self.namespace) sgs = [self._to_security_group(el) for el in sg_elements] return sgs return self._request_multiple_pages(self.path, params, _parse_response) def ex_list_security_group_attributes(self, group_id=None, nic_type='internet'): """ List security group attributes in the current region. :keyword group_id: security group id. :type group_id: ``str`` :keyword nic_type: internet|intranet. :type nic_type: ``str`` :return: a list of defined security group Attributes :rtype: ``list`` of ``ECSSecurityGroupAttribute`` """ params = {'Action': 'DescribeSecurityGroupAttribute', 'RegionId': self.region, 'NicType': nic_type} if group_id is None: raise AttributeError('group_id is required') params['SecurityGroupId'] = group_id resp_object = self.connection.request(self.path, params).object sga_elements = findall(resp_object, 'Permissions/Permission', namespace=self.namespace) return [self._to_security_group_attribute(el) for el in sga_elements] def ex_join_security_group(self, node, group_id=None): """ Join a node into security group. :param node: The node to join security group :type node: :class:`Node` :param group_id: security group id. :type group_id: ``str`` :return: join operation result. :rtype: ``bool`` """ if group_id is None: raise AttributeError('group_id is required') if node.state != NodeState.RUNNING and \ node.state != NodeState.STOPPED: raise LibcloudError('The node state with id % s need\ be running or stopped .' % node.id) params = {'Action': 'JoinSecurityGroup', 'InstanceId': node.id, 'SecurityGroupId': group_id} resp = self.connection.request(self.path, params) return resp.success() def ex_leave_security_group(self, node, group_id=None): """ Leave a node from security group. :param node: The node to leave security group :type node: :class:`Node` :param group_id: security group id. :type group_id: ``str`` :return: leave operation result. :rtype: ``bool`` """ if group_id is None: raise AttributeError('group_id is required') if node.state != NodeState.RUNNING and \ node.state != NodeState.STOPPED: raise LibcloudError('The node state with id % s need\ be running or stopped .' % node.id) params = {'Action': 'LeaveSecurityGroup', 'InstanceId': node.id, 'SecurityGroupId': group_id} resp = self.connection.request(self.path, params) return resp.success() def ex_list_zones(self, region_id=None): """ List availability zones in the given region or the current region. :keyword region_id: the id of the region to query zones from :type region_id: ``str`` :return: list of zones :rtype: ``list`` of ``ECSZone`` """ params = {'Action': 'DescribeZones'} if region_id: params['RegionId'] = region_id else: params['RegionId'] = self.region resp_body = self.connection.request(self.path, params).object zone_elements = findall(resp_body, 'Zones/Zone', namespace=self.namespace) zones = [self._to_zone(el) for el in zone_elements] return zones ## # Volume and snapshot management methods ## def list_volumes(self, ex_volume_ids=None, ex_filters=None): """ List all volumes. @inherits: :class:`NodeDriver.list_volumes` :keyword ex_volume_ids: a list of volume's ids used to filter volumes. Only the volumes which's id in this list will be returned. :type ex_volume_ids: ``list`` of ``str`` :keyword ex_filters: volume attribute and value pairs to filter volumes. Only the volumes which matchs all will be returned. If the filter attribute need a json array value, use ``list`` object, the driver will convert it. :type ex_filters: ``dict`` """ params = {'Action': 'DescribeDisks', 'RegionId': self.region} if ex_volume_ids: if isinstance(ex_volume_ids, list): params['DiskIds'] = self._list_to_json_array(ex_volume_ids) else: raise AttributeError('ex_volume_ids should be a list of ' 'volume ids.') if ex_filters: if not isinstance(ex_filters, dict): raise AttributeError('ex_filters should be a dict of ' 'volume attributes.') else: for key in ex_filters.keys(): params[key] = ex_filters[key] def _parse_response(resp_object): disk_elements = findall(resp_object, 'Disks/Disk', namespace=self.namespace) volumes = [self._to_volume(each) for each in disk_elements] return volumes return self._request_multiple_pages(self.path, params, _parse_response) def list_volume_snapshots(self, volume, ex_snapshot_ids=[], ex_filters=None): """ List snapshots for a storage volume. @inherites :class:`NodeDriver.list_volume_snapshots` :keyword ex_snapshot_ids: a list of snapshot ids to filter the snapshots returned. :type ex_snapshot_ids: ``list`` of ``str`` :keyword ex_filters: snapshot attribute and value pairs to filter snapshots. Only the snapshot which matchs all the pairs will be returned. If the filter attribute need a json array value, use ``list`` object, the driver will convert it. :type ex_filters: ``dict`` """ params = {'Action': 'DescribeSnapshots', 'RegionId': self.region} if volume: params['DiskId'] = volume.id if ex_snapshot_ids and isinstance(ex_snapshot_ids, list): params['SnapshotIds'] = self._list_to_json_array(ex_snapshot_ids) if ex_filters and isinstance(ex_filters, dict): for key in ex_filters.keys(): params[key] = ex_filters[key] def _parse_response(resp_body): snapshot_elements = findall(resp_body, 'Snapshots/Snapshot', namespace=self.namespace) snapshots = [self._to_snapshot(each) for each in snapshot_elements] return snapshots return self._request_multiple_pages(self.path, params, _parse_response) def create_volume(self, size, name, location=None, snapshot=None, ex_zone_id=None, ex_description=None, ex_disk_category=None, ex_client_token=None): """ Create a new volume. @inherites :class:`NodeDriver.create_volume` :keyword ex_zone_id: the availability zone id (required) :type ex_zone_id: ``str`` :keyword ex_description: volume description :type ex_description: ``unicode`` :keyword ex_disk_category: disk category for data disk :type ex_disk_category: ``str`` :keyword ex_client_token: a token generated by client to identify each request. :type ex_client_token: ``str`` """ params = {'Action': 'CreateDisk', 'RegionId': self.region, 'DiskName': name, 'Size': size} if ex_zone_id is None: raise AttributeError('ex_zone_id is required') params['ZoneId'] = ex_zone_id if snapshot is not None and isinstance(snapshot, VolumeSnapshot): params['SnapshotId'] = snapshot.id if ex_description: params['Description'] = ex_description if ex_disk_category: params['DiskCategory'] = ex_disk_category if ex_client_token: params['ClientToken'] = ex_client_token resp = self.connection.request(self.path, params).object volume_id = findtext(resp, 'DiskId', namespace=self.namespace) volumes = self.list_volumes(ex_volume_ids=[volume_id]) if len(volumes) != 1: raise LibcloudError('could not find the new create volume ' 'with id %s.' % volume_id, driver=self) return volumes[0] def create_volume_snapshot(self, volume, name=None, ex_description=None, ex_client_token=None): """ Creates a snapshot of the storage volume. @inherits :class:`NodeDriver.create_volume_snapshot` :keyword ex_description: description of the snapshot. :type ex_description: ``unicode`` :keyword ex_client_token: a token generated by client to identify each request. :type ex_client_token: ``str`` """ params = {'Action': 'CreateSnapshot', 'DiskId': volume.id} if name: params['SnapshotName'] = name if ex_description: params['Description'] = ex_description if ex_client_token: params['ClientToken'] = ex_client_token snapshot_elements = self.connection.request(self.path, params).object snapshot_id = findtext(snapshot_elements, 'SnapshotId', namespace=self.namespace) snapshots = self.list_volume_snapshots(volume=None, ex_snapshot_ids=[snapshot_id]) if len(snapshots) != 1: raise LibcloudError('could not find new created snapshot with ' 'id %s.' % snapshot_id, driver=self) return snapshots[0] def attach_volume(self, node, volume, device=None, ex_delete_with_instance=None): """ Attaches volume to node. @inherits :class:`NodeDriver.attach_volume` :keyword device: device path allocated for this attached volume :type device: ``str`` between /dev/xvdb to xvdz, if empty, allocated by the system :keyword ex_delete_with_instance: if to delete this volume when the instance is deleted. :type ex_delete_with_instance: ``bool`` """ params = {'Action': 'AttachDisk', 'InstanceId': node.id, 'DiskId': volume.id} if device: params['Device'] = device if ex_delete_with_instance: params['DeleteWithInstance'] = \ str(bool(ex_delete_with_instance)).lower() resp = self.connection.request(self.path, params) return resp.success() def detach_volume(self, volume, ex_instance_id=None): """ Detaches a volume from a node. @inherits :class:`NodeDriver.detach_volume` :keyword ex_instance_id: the id of the instance from which the volume is detached. :type ex_instance_id: ``str`` """ params = {'Action': 'DetachDisk', 'DiskId': volume.id} if ex_instance_id: params['InstanceId'] = ex_instance_id else: volumes = self.list_volumes(ex_volume_ids=[volume.id]) if len(volumes) != 1: raise AttributeError('could not find the instance id ' 'the volume %s attached to, ' 'ex_instance_id is required.' % volume.id) params['InstanceId'] = volumes[0].extra['instance_id'] resp = self.connection.request(self.path, params) return resp.success() def destroy_volume(self, volume): params = {'Action': 'DeleteDisk', 'DiskId': volume.id} volumes = self.list_volumes(ex_volume_ids=[volume.id]) if len(volumes) != 1: raise LibcloudError('could not find the volume with id %s.' % volume.id, driver=self) if volumes[0].state != StorageVolumeState.AVAILABLE: raise LibcloudError('only volume in AVAILABLE state could be ' 'destroyed.', driver=self) resp = self.connection.request(self.path, params) return resp.success() def destroy_volume_snapshot(self, snapshot): params = {'Action': 'DeleteSnapshot'} if snapshot and isinstance(snapshot, VolumeSnapshot): params['SnapshotId'] = snapshot.id else: raise AttributeError('snapshot is required and must be a ' 'VolumeSnapshot') resp = self.connection.request(self.path, params) return resp.success() ## # Image management methods ## def list_images(self, location=None, ex_image_ids=None, ex_filters=None): """ List images on a provider. @inherits :class:`NodeDriver.list_images` :keyword ex_image_ids: a list of image ids to filter the images to be returned. :type ex_image_ids: ``list`` of ``str`` :keyword ex_filters: image attribute and value pairs to filter images. Only the image which matchs all the pairs will be returned. If the filter attribute need a json array value, use ``list`` object, the driver will convert it. :type ex_filters: ``dict`` """ if location and isinstance(location, NodeLocation): region = location.id else: region = self.region params = {'Action': 'DescribeImages', 'RegionId': region} if ex_image_ids: if isinstance(ex_image_ids, list): params['ImageId'] = ','.join(ex_image_ids) else: raise AttributeError('ex_image_ids should be a list of ' 'image ids') if ex_filters and isinstance(ex_filters, dict): for key in ex_filters.keys(): params[key] = ex_filters[key] def _parse_response(resp_body): image_elements = findall(resp_body, 'Images/Image', namespace=self.namespace) images = [self._to_image(each) for each in image_elements] return images return self._request_multiple_pages(self.path, params, _parse_response) def create_image(self, node, name, description=None, ex_snapshot_id=None, ex_image_version=None, ex_client_token=None): """ Creates an image from a system disk snapshot. @inherits :class:`NodeDriver.create_image` :keyword ex_snapshot_id: the id of the snapshot to create the image. (required) :type ex_snapshot_id: ``str`` :keyword ex_image_version: the version number of the image :type ex_image_version: ``str`` :keyword ex_client_token: a token generated by client to identify each request. :type ex_client_token: ``str`` """ params = {'Action': 'CreateImage', 'RegionId': self.region} if name: params['ImageName'] = name if description: params['Description'] = description if ex_snapshot_id: params['SnapshotId'] = ex_snapshot_id else: raise AttributeError('ex_snapshot_id is required') if ex_image_version: params['ImageVersion'] = ex_image_version if ex_client_token: params['ClientToken'] = ex_client_token resp = self.connection.request(self.path, params) image_id = findtext(resp.object, 'ImageId', namespace=self.namespace) return self.get_image(image_id=image_id) def delete_image(self, node_image): params = {'Action': 'DeleteImage', 'RegionId': self.region, 'ImageId': node_image.id} resp = self.connection.request(self.path, params) return resp.success() def get_image(self, image_id, ex_region_id=None): if ex_region_id: region = ex_region_id else: region = self.region location = NodeLocation(id=region, name=None, country=None, driver=self) images = self.list_images(location, ex_image_ids=[image_id]) if len(images) != 1: raise LibcloudError('could not find the image with id %s' % image_id, driver=self) return images[0] def copy_image(self, source_region, node_image, name, description=None, ex_destination_region_id=None, ex_client_token=None): """ Copies an image from a source region to the destination region. If not provide a destination region, default to the current region. @inherits :class:`NodeDriver.copy_image` :keyword ex_destination_region_id: id of the destination region :type ex_destination_region_id: ``str`` :keyword ex_client_token: a token generated by client to identify each request. :type ex_client_token: ``str`` """ params = {'Action': 'CopyImage', 'RegionId': source_region, 'ImageId': node_image.id} if ex_destination_region_id is not None: params['DestinationRegionId'] = ex_destination_region_id else: params['DestinationRegionId'] = self.region if name: params['DestinationImageName'] = name if description: params['DestinationDescription'] = description if ex_client_token: params['ClientToken'] = ex_client_token resp = self.connection.request(self.path, params) image_id = findtext(resp.object, 'ImageId', namespace=self.namespace) return self.get_image(image_id=image_id) def create_public_ip(self, instance_id): """ Create public ip. :keyword instance_id: instance id for allocating public ip. :type instance_id: ``str`` :return public ip :rtype ``str`` """ params = {'Action': 'AllocatePublicIpAddress', 'InstanceId': instance_id} resp = self.connection.request(self.path, params=params) return findtext(resp.object, 'IpAddress', namespace=self.namespace) def _to_nodes(self, object): """ Convert response to Node object list :param object: parsed response object :return: a list of ``Node`` :rtype: ``list`` """ node_elements = findall(object, 'Instances/Instance', self.namespace) return [self._to_node(el) for el in node_elements] def _to_node(self, instance): """ Convert an InstanceAttributesType object to ``Node`` object :param instance: a xml element represents an instance :return: a ``Node`` object :rtype: ``Node`` """ _id = findtext(element=instance, xpath='InstanceId', namespace=self.namespace) name = findtext(element=instance, xpath='InstanceName', namespace=self.namespace) instance_status = findtext(element=instance, xpath='Status', namespace=self.namespace) state = self.NODE_STATE_MAPPING.get(instance_status, NodeState.UNKNOWN) def _get_ips(ip_address_els): return [each.text for each in ip_address_els] public_ip_els = findall(element=instance, xpath='PublicIpAddress/IpAddress', namespace=self.namespace) public_ips = _get_ips(public_ip_els) private_ip_els = findall(element=instance, xpath='InnerIpAddress/IpAddress', namespace=self.namespace) private_ips = _get_ips(private_ip_els) # Extra properties extra = self._get_extra_dict(instance, RESOURCE_EXTRA_ATTRIBUTES_MAP['node']) extra['vpc_attributes'] = self._get_vpc_attributes(instance) extra['eip_address'] = self._get_eip_address(instance) extra['operation_locks'] = self._get_operation_locks(instance) node = Node(id=_id, name=name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra) return node def _get_extra_dict(self, element, mapping): """ Extract attributes from the element based on rules provided in the mapping dictionary. :param element: Element to parse the values from. :type element: xml.etree.ElementTree.Element. :param mapping: Dictionary with the extra layout :type node: :class:`Node` :rtype: ``dict`` """ extra = {} for attribute, values in mapping.items(): transform_func = values['transform_func'] value = findattr(element=element, xpath=values['xpath'], namespace=self.namespace) if value: try: extra[attribute] = transform_func(value) except Exception: extra[attribute] = None else: extra[attribute] = value return extra def _get_internet_related_params(self, ex_internet_charge_type, ex_internet_max_bandwidth_in, ex_internet_max_bandwidth_out): params = {} if ex_internet_charge_type: params['InternetChargeType'] = ex_internet_charge_type if ex_internet_charge_type.lower() == 'paybytraffic': if ex_internet_max_bandwidth_out: params['InternetMaxBandwidthOut'] = \ ex_internet_max_bandwidth_out else: raise AttributeError('ex_internet_max_bandwidth_out is ' 'mandatory for PayByTraffic internet' ' charge type.') elif ex_internet_max_bandwidth_out: params['InternetMaxBandwidthOut'] = \ ex_internet_max_bandwidth_out if ex_internet_max_bandwidth_in: params['InternetMaxBandwidthIn'] = \ ex_internet_max_bandwidth_in return params def _get_system_disk(self, ex_system_disk): if not isinstance(ex_system_disk, dict): raise AttributeError('ex_system_disk is not a dict') sys_disk_dict = ex_system_disk key_base = 'SystemDisk.' # TODO(samsong8610): Use a type instead of dict mappings = {'category': 'Category', 'disk_name': 'DiskName', 'description': 'Description'} params = {} for attr in mappings.keys(): if attr in sys_disk_dict: params[key_base + mappings[attr]] = sys_disk_dict[attr] return params def _get_data_disks(self, ex_data_disks): if isinstance(ex_data_disks, dict): data_disks = [ex_data_disks] elif isinstance(ex_data_disks, list): data_disks = ex_data_disks else: raise AttributeError('ex_data_disks should be a list of dict') # TODO(samsong8610): Use a type instead of dict mappings = {'size': 'Size', 'category': 'Category', 'snapshot_id': 'SnapshotId', 'disk_name': 'DiskName', 'description': 'Description', 'device': 'Device', 'delete_with_instance': 'DeleteWithInstance'} params = {} for idx, disk in enumerate(data_disks): key_base = 'DataDisk.{0}.'.format(idx + 1) for attr in mappings.keys(): if attr in disk: if attr == 'delete_with_instance': # Convert bool value to str value = str(disk[attr]).lower() else: value = disk[attr] params[key_base + mappings[attr]] = value return params def _get_vpc_attributes(self, instance): vpcs = findall(instance, xpath='VpcAttributes', namespace=self.namespace) if len(vpcs) <= 0: return None return self._get_extra_dict( vpcs[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['vpc_attributes']) def _get_eip_address(self, instance): eips = findall(instance, xpath='EipAddress', namespace=self.namespace) if len(eips) <= 0: return None return self._get_extra_dict( eips[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['eip_address_associate']) def _get_operation_locks(self, instance): locks = findall(instance, xpath='OperationLocks', namespace=self.namespace) if len(locks) <= 0: return None return self._get_extra_dict( locks[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['operation_locks']) def _wait_until_state(self, nodes, state, wait_period=3, timeout=600): """ Block until the provided nodes are in the desired state. :param nodes: List of nodes to wait for :type nodes: ``list`` of :class:`.Node` :param state: desired state :type state: ``NodeState`` :param wait_period: How many seconds to wait between each loop iteration. (default is 3) :type wait_period: ``int`` :param timeout: How many seconds to wait before giving up. (default is 600) :type timeout: ``int`` :return: if the nodes are in the desired state. :rtype: ``bool`` """ start = time.time() end = start + timeout node_ids = [node.id for node in nodes] while(time.time() < end): matched_nodes = self.list_nodes(ex_node_ids=node_ids) if len(matched_nodes) > len(node_ids): found_ids = [node.id for node in matched_nodes] msg = ('found multiple nodes with same ids, ' 'desired ids: %(ids)s, found ids: %(found_ids)s' % {'ids': node_ids, 'found_ids': found_ids}) raise LibcloudError(value=msg, driver=self) desired_nodes = [node for node in matched_nodes if node.state == state] if len(desired_nodes) == len(node_ids): return True else: time.sleep(wait_period) continue raise LibcloudError(value='Timed out after %s seconds' % (timeout), driver=self) def _to_volume(self, element): _id = findtext(element, 'DiskId', namespace=self.namespace) name = findtext(element, 'DiskName', namespace=self.namespace) size = int(findtext(element, 'Size', namespace=self.namespace)) status_str = findtext(element, 'Status', namespace=self.namespace) status = self.VOLUME_STATE_MAPPING.get(status_str, StorageVolumeState.UNKNOWN) extra = self._get_extra_dict(element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume']) extra['operation_locks'] = self._get_operation_locks(element) return StorageVolume(_id, name, size, self, state=status, extra=extra) def _list_to_json_array(self, value): try: return json.dumps(value) except Exception: raise AttributeError('could not convert list to json array') def _to_snapshot(self, element): _id = findtext(element, 'SnapshotId', namespace=self.namespace) created = findtext(element, 'CreationTime', namespace=self.namespace) status_str = findtext(element, 'Status', namespace=self.namespace) state = self.SNAPSHOT_STATE_MAPPING.get(status_str, VolumeSnapshotState.UNKNOWN) extra = self._get_extra_dict(element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot']) return VolumeSnapshot(id=_id, driver=self, extra=extra, created=created, state=state) def _to_size(self, element): _id = findtext(element, 'InstanceTypeId', namespace=self.namespace) ram = float(findtext(element, 'MemorySize', namespace=self.namespace)) extra = {} extra['cpu_core_count'] = int(findtext(element, 'CpuCoreCount', namespace=self.namespace)) extra['instance_type_family'] = findtext(element, 'InstanceTypeFamily', namespace=self.namespace) return NodeSize(id=_id, name=_id, ram=ram, disk=None, bandwidth=None, price=None, driver=self, extra=extra) def _to_location(self, element): _id = findtext(element, 'RegionId', namespace=self.namespace) localname = findtext(element, 'LocalName', namespace=self.namespace) return NodeLocation(id=_id, name=localname, country=None, driver=self) def _to_image(self, element): _id = findtext(element, 'ImageId', namespace=self.namespace) name = findtext(element, 'ImageName', namespace=self.namespace) extra = self._get_extra_dict(element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image']) extra['disk_device_mappings'] = self._get_disk_device_mappings( element.find('DiskDeviceMappings')) return NodeImage(id=_id, name=name, driver=self, extra=extra) def _get_disk_device_mappings(self, element): if element is None: return None mapping_element = element.find('DiskDeviceMapping') if mapping_element is not None: return self._get_extra_dict( mapping_element, RESOURCE_EXTRA_ATTRIBUTES_MAP['disk_device_mapping']) return None def _to_security_group(self, element): _id = findtext(element, 'SecurityGroupId', namespace=self.namespace) name = findtext(element, 'SecurityGroupName', namespace=self.namespace) description = findtext(element, 'Description', namespace=self.namespace) vpc_id = findtext(element, 'VpcId', namespace=self.namespace) creation_time = findtext(element, 'CreationTime', namespace=self.namespace) return ECSSecurityGroup(_id, name, description=description, driver=self, vpc_id=vpc_id, creation_time=creation_time) def _to_security_group_attribute(self, element): ip_protocol = findtext(element, 'IpProtocol', namespace=self.namespace) port_range = findtext(element, 'PortRange', namespace=self.namespace) source_group_id = findtext(element, 'SourceGroupId', namespace=self.namespace) policy = findtext(element, 'Policy', namespace=self.namespace) nic_type = findtext(element, 'NicType', namespace=self.namespace) return ECSSecurityGroupAttribute(ip_protocol=ip_protocol, port_range=port_range, source_group_id=source_group_id, policy=policy, nic_type=nic_type) def _to_zone(self, element): _id = findtext(element, 'ZoneId', namespace=self.namespace) local_name = findtext(element, 'LocalName', namespace=self.namespace) resource_types = findall(element, 'AvailableResourceCreation/ResourceTypes', namespace=self.namespace) instance_types = findall(element, 'AvailableInstanceTypes/InstanceTypes', namespace=self.namespace) disk_categories = findall(element, 'AvailableDiskCategories/DiskCategories', namespace=self.namespace) def _text(element): return element.text return ECSZone(id=_id, name=local_name, driver=self, available_resource_types=list( map(_text, resource_types)), available_instance_types=list( map(_text, instance_types)), available_disk_categories=list( map(_text, disk_categories))) def _get_pagination(self, element): page_number = int(findtext(element, 'PageNumber')) total_count = int(findtext(element, 'TotalCount')) page_size = int(findtext(element, 'PageSize')) return Pagination(total=total_count, size=page_size, current=page_number) def _request_multiple_pages(self, path, params, parse_func): """ Request all resources by multiple pages. :param path: the resource path :type path: ``str`` :param params: the query parameters :type params: ``dict`` :param parse_func: the function object to parse the response body :param type: ``function`` :return: list of resource object, if not found any, return [] :rtype: ``list`` """ results = [] while True: one_page = self.connection.request(path, params).object resources = parse_func(one_page) results += resources pagination = self._get_pagination(one_page) if pagination.next() is None: break params.update(pagination.to_dict()) return results apache-libcloud-2.2.1/libcloud/compute/drivers/linode.py0000664000175000017500000006275613153541406023213 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """libcloud driver for the Linode(R) API This driver implements all libcloud functionality for the Linode API. Since the API is a bit more fine-grained, create_node abstracts a significant amount of work (and may take a while to run). Linode home page http://www.linode.com/ Linode API documentation http://www.linode.com/api/ Alternate bindings for reference http://github.com/tjfontaine/linode-python Linode(R) is a registered trademark of Linode, LLC. """ import os try: import simplejson as json except ImportError: import json import itertools import binascii from copy import copy from libcloud.utils.py3 import PY3 from libcloud.common.linode import (API_ROOT, LinodeException, LinodeConnection, LINODE_PLAN_IDS, LINODE_DISK_FILESYSTEMS) from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey from libcloud.compute.base import NodeImage, StorageVolume class LinodeNodeDriver(NodeDriver): """libcloud driver for the Linode API Rough mapping of which is which: - list_nodes linode.list - reboot_node linode.reboot - destroy_node linode.delete - create_node linode.create, linode.update, linode.disk.createfromdistribution, linode.disk.create, linode.config.create, linode.ip.addprivate, linode.boot - list_sizes avail.linodeplans - list_images avail.distributions - list_locations avail.datacenters - list_volumes linode.disk.list - destroy_volume linode.disk.delete For more information on the Linode API, be sure to read the reference: http://www.linode.com/api/ """ type = Provider.LINODE name = "Linode" website = 'http://www.linode.com/' connectionCls = LinodeConnection _linode_plan_ids = LINODE_PLAN_IDS _linode_disk_filesystems = LINODE_DISK_FILESYSTEMS features = {'create_node': ['ssh_key', 'password']} def __init__(self, key): """Instantiate the driver with the given API key :param key: the API key to use (required) :type key: ``str`` :rtype: ``None`` """ self.datacenter = None NodeDriver.__init__(self, key) # Converts Linode's state from DB to a NodeState constant. LINODE_STATES = { (-2): NodeState.UNKNOWN, # Boot Failed (-1): NodeState.PENDING, # Being Created 0: NodeState.PENDING, # Brand New 1: NodeState.RUNNING, # Running 2: NodeState.STOPPED, # Powered Off 3: NodeState.REBOOTING, # Shutting Down 4: NodeState.UNKNOWN # Reserved } def list_nodes(self): """ List all Linodes that the API key can access This call will return all Linodes that the API key in use has access to. If a node is in this list, rebooting will work; however, creation and destruction are a separate grant. :return: List of node objects that the API key can access :rtype: ``list`` of :class:`Node` """ params = {"api_action": "linode.list"} data = self.connection.request(API_ROOT, params=params).objects[0] return self._to_nodes(data) def ex_start_node(self, node): """ Boot the given Linode """ params = {"api_action": "linode.boot", "LinodeID": node.id} self.connection.request(API_ROOT, params=params) return True def ex_stop_node(self, node): """ Shutdown the given Linode """ params = {"api_action": "linode.shutdown", "LinodeID": node.id} self.connection.request(API_ROOT, params=params) return True def reboot_node(self, node): """ Reboot the given Linode Will issue a shutdown job followed by a boot job, using the last booted configuration. In most cases, this will be the only configuration. :param node: the Linode to reboot :type node: :class:`Node` :rtype: ``bool`` """ params = {"api_action": "linode.reboot", "LinodeID": node.id} self.connection.request(API_ROOT, params=params) return True def destroy_node(self, node): """Destroy the given Linode Will remove the Linode from the account and issue a prorated credit. A grant for removing Linodes from the account is required, otherwise this method will fail. In most cases, all disk images must be removed from a Linode before the Linode can be removed; however, this call explicitly skips those safeguards. There is no going back from this method. :param node: the Linode to destroy :type node: :class:`Node` :rtype: ``bool`` """ params = {"api_action": "linode.delete", "LinodeID": node.id, "skipChecks": True} self.connection.request(API_ROOT, params=params) return True def create_node(self, **kwargs): """Create a new Linode, deploy a Linux distribution, and boot This call abstracts much of the functionality of provisioning a Linode and getting it booted. A global grant to add Linodes to the account is required, as this call will result in a billing charge. Note that there is a safety valve of 5 Linodes per hour, in order to prevent a runaway script from ruining your day. :keyword name: the name to assign the Linode (mandatory) :type name: ``str`` :keyword image: which distribution to deploy on the Linode (mandatory) :type image: :class:`NodeImage` :keyword size: the plan size to create (mandatory) :type size: :class:`NodeSize` :keyword auth: an SSH key or root password (mandatory) :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` :keyword location: which datacenter to create the Linode in :type location: :class:`NodeLocation` :keyword ex_swap: size of the swap partition in MB (128) :type ex_swap: ``int`` :keyword ex_rsize: size of the root partition in MB (plan size - swap). :type ex_rsize: ``int`` :keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable). :type ex_kernel: ``str`` :keyword ex_payment: one of 1, 12, or 24; subscription length (1) :type ex_payment: ``int`` :keyword ex_comment: a small comment for the configuration (libcloud) :type ex_comment: ``str`` :keyword ex_private: whether or not to request a private IP (False) :type ex_private: ``bool`` :keyword lconfig: what to call the configuration (generated) :type lconfig: ``str`` :keyword lroot: what to call the root image (generated) :type lroot: ``str`` :keyword lswap: what to call the swap space (generated) :type lswap: ``str`` :return: Node representing the newly-created Linode :rtype: :class:`Node` """ name = kwargs["name"] image = kwargs["image"] size = kwargs["size"] auth = self._get_and_check_auth(kwargs["auth"]) # Pick a location (resolves LIBCLOUD-41 in JIRA) if "location" in kwargs: chosen = kwargs["location"].id elif self.datacenter: chosen = self.datacenter else: raise LinodeException(0xFB, "Need to select a datacenter first") # Step 0: Parameter validation before we purchase # We're especially careful here so we don't fail after purchase, rather # than getting halfway through the process and having the API fail. # Plan ID plans = self.list_sizes() if size.id not in [p.id for p in plans]: raise LinodeException(0xFB, "Invalid plan ID -- avail.plans") # Payment schedule payment = "1" if "ex_payment" not in kwargs else \ str(kwargs["ex_payment"]) if payment not in ["1", "12", "24"]: raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)") ssh = None root = None # SSH key and/or root password if isinstance(auth, NodeAuthSSHKey): ssh = auth.pubkey elif isinstance(auth, NodeAuthPassword): root = auth.password if not ssh and not root: raise LinodeException(0xFB, "Need SSH key or root password") if root is not None and len(root) < 6: raise LinodeException(0xFB, "Root password is too short") # Swap size try: swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"]) except: raise LinodeException(0xFB, "Need an integer swap size") # Root partition size imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else\ int(kwargs["ex_rsize"]) if (imagesize + swap) > size.disk: raise LinodeException(0xFB, "Total disk images are too big") # Distribution ID distros = self.list_images() if image.id not in [d.id for d in distros]: raise LinodeException(0xFB, "Invalid distro -- avail.distributions") # Kernel if "ex_kernel" in kwargs: kernel = kwargs["ex_kernel"] else: if image.extra['64bit']: # For a list of available kernel ids, see # https://www.linode.com/kernels/ kernel = 138 else: kernel = 137 params = {"api_action": "avail.kernels"} kernels = self.connection.request(API_ROOT, params=params).objects[0] if kernel not in [z["KERNELID"] for z in kernels]: raise LinodeException(0xFB, "Invalid kernel -- avail.kernels") # Comments comments = "Created by Apache libcloud " if\ "ex_comment" not in kwargs else kwargs["ex_comment"] # Step 1: linode.create params = { "api_action": "linode.create", "DatacenterID": chosen, "PlanID": size.id, "PaymentTerm": payment } data = self.connection.request(API_ROOT, params=params).objects[0] linode = {"id": data["LinodeID"]} # Step 1b. linode.update to rename the Linode params = { "api_action": "linode.update", "LinodeID": linode["id"], "Label": name } self.connection.request(API_ROOT, params=params) # Step 1c. linode.ip.addprivate if it was requested if "ex_private" in kwargs and kwargs["ex_private"]: params = { "api_action": "linode.ip.addprivate", "LinodeID": linode["id"] } self.connection.request(API_ROOT, params=params) # Step 1d. Labels # use the linode id as the name can be up to 63 chars and the labels # are limited to 48 chars label = { "lconfig": "[%s] Configuration Profile" % linode["id"], "lroot": "[%s] %s Disk Image" % (linode["id"], image.name), "lswap": "[%s] Swap Space" % linode["id"] } for what in ["lconfig", "lroot", "lswap"]: if what in kwargs: label[what] = kwargs[what] # Step 2: linode.disk.createfromdistribution if not root: root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip() params = { "api_action": "linode.disk.createfromdistribution", "LinodeID": linode["id"], "DistributionID": image.id, "Label": label["lroot"], "Size": imagesize, "rootPass": root, } if ssh: params["rootSSHKey"] = ssh data = self.connection.request(API_ROOT, params=params).objects[0] linode["rootimage"] = data["DiskID"] # Step 3: linode.disk.create for swap params = { "api_action": "linode.disk.create", "LinodeID": linode["id"], "Label": label["lswap"], "Type": "swap", "Size": swap } data = self.connection.request(API_ROOT, params=params).objects[0] linode["swapimage"] = data["DiskID"] # Step 4: linode.config.create for main profile disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"]) params = { "api_action": "linode.config.create", "LinodeID": linode["id"], "KernelID": kernel, "Label": label["lconfig"], "Comments": comments, "DiskList": disks } if "ex_private" in kwargs and kwargs["ex_private"]: params['helper_network'] = True params['helper_distro'] = True data = self.connection.request(API_ROOT, params=params).objects[0] linode["config"] = data["ConfigID"] # Step 5: linode.boot params = { "api_action": "linode.boot", "LinodeID": linode["id"], "ConfigID": linode["config"] } self.connection.request(API_ROOT, params=params) # Make a node out of it and hand it back params = {"api_action": "linode.list", "LinodeID": linode["id"]} data = self.connection.request(API_ROOT, params=params).objects[0] nodes = self._to_nodes(data) if len(nodes) == 1: node = nodes[0] if getattr(auth, "generated", False): node.extra['password'] = auth.password return node return None def ex_resize_node(self, node, size): """Resizes a Linode from one plan to another Immediately shuts the Linode down, charges/credits the account, and issue a migration to another host server. Requires a size (numeric), which is the desired PlanID available from avail.LinodePlans() After resize is complete the node needs to be booted """ params = {"api_action": "linode.resize", "LinodeID": node.id, "PlanID": size} self.connection.request(API_ROOT, params=params) return True def ex_rename_node(self, node, name): """Renames a node""" params = { "api_action": "linode.update", "LinodeID": node.id, "Label": name } self.connection.request(API_ROOT, params=params) return True def list_sizes(self, location=None): """ List available Linode plans Gets the sizes that can be used for creating a Linode. Since available Linode plans vary per-location, this method can also be passed a location to filter the availability. :keyword location: the facility to retrieve plans in :type location: :class:`NodeLocation` :rtype: ``list`` of :class:`NodeSize` """ params = {"api_action": "avail.linodeplans"} data = self.connection.request(API_ROOT, params=params).objects[0] sizes = [] for obj in data: n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"], disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"], price=obj["PRICE"], driver=self.connection.driver) sizes.append(n) return sizes def list_images(self): """ List available Linux distributions Retrieve all Linux distributions that can be deployed to a Linode. :rtype: ``list`` of :class:`NodeImage` """ params = {"api_action": "avail.distributions"} data = self.connection.request(API_ROOT, params=params).objects[0] distros = [] for obj in data: i = NodeImage(id=obj["DISTRIBUTIONID"], name=obj["LABEL"], driver=self.connection.driver, extra={'pvops': obj['REQUIRESPVOPSKERNEL'], '64bit': obj['IS64BIT']}) distros.append(i) return distros def list_locations(self): """ List available facilities for deployment Retrieve all facilities that a Linode can be deployed in. :rtype: ``list`` of :class:`NodeLocation` """ params = {"api_action": "avail.datacenters"} data = self.connection.request(API_ROOT, params=params).objects[0] nl = [] for dc in data: country = None if "USA" in dc["LOCATION"]: country = "US" elif "UK" in dc["LOCATION"]: country = "GB" elif "JP" in dc["LOCATION"]: country = "JP" else: country = "??" nl.append(NodeLocation(dc["DATACENTERID"], dc["LOCATION"], country, self)) return nl def linode_set_datacenter(self, dc): """ Set the default datacenter for Linode creation Since Linodes must be created in a facility, this function sets the default that :class:`create_node` will use. If a location keyword is not passed to :class:`create_node`, this method must have already been used. :keyword dc: the datacenter to create Linodes in unless specified :type dc: :class:`NodeLocation` :rtype: ``bool`` """ did = dc.id params = {"api_action": "avail.datacenters"} data = self.connection.request(API_ROOT, params=params).objects[0] for datacenter in data: if did == dc["DATACENTERID"]: self.datacenter = did return dcs = ", ".join([d["DATACENTERID"] for d in data]) self.datacenter = None raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs) def destroy_volume(self, volume): """ Destroys disk volume for the Linode. Linode id is to be provided as extra["LinodeId"] whithin :class:`StorageVolume`. It can be retrieved by :meth:`libcloud.compute.drivers.linode.LinodeNodeDriver\ .ex_list_volumes`. :param volume: Volume to be destroyed :type volume: :class:`StorageVolume` :rtype: ``bool`` """ if not isinstance(volume, StorageVolume): raise LinodeException(0xFD, "Invalid volume instance") if volume.extra["LINODEID"] is None: raise LinodeException(0xFD, "Missing LinodeID") params = { "api_action": "linode.disk.delete", "LinodeID": volume.extra["LINODEID"], "DiskID": volume.id, } self.connection.request(API_ROOT, params=params) return True def ex_create_volume(self, size, name, node, fs_type): """ Create disk for the Linode. :keyword size: Size of volume in megabytes (required) :type size: ``int`` :keyword name: Name of the volume to be created :type name: ``str`` :keyword node: Node to attach volume to. :type node: :class:`Node` :keyword fs_type: The formatted type of this disk. Valid types are: ext3, ext4, swap, raw :type fs_type: ``str`` :return: StorageVolume representing the newly-created volume :rtype: :class:`StorageVolume` """ # check node if not isinstance(node, Node): raise LinodeException(0xFD, "Invalid node instance") # check space available total_space = node.extra['TOTALHD'] existing_volumes = self.ex_list_volumes(node) used_space = 0 for volume in existing_volumes: used_space = used_space + volume.size available_space = total_space - used_space if available_space < size: raise LinodeException(0xFD, "Volume size too big. Available space\ %d" % available_space) # check filesystem type if fs_type not in self._linode_disk_filesystems: raise LinodeException(0xFD, "Not valid filesystem type") params = { "api_action": "linode.disk.create", "LinodeID": node.id, "Label": name, "Type": fs_type, "Size": size } data = self.connection.request(API_ROOT, params=params).objects[0] volume = data["DiskID"] # Make a volume out of it and hand it back params = { "api_action": "linode.disk.list", "LinodeID": node.id, "DiskID": volume } data = self.connection.request(API_ROOT, params=params).objects[0] return self._to_volumes(data)[0] def ex_list_volumes(self, node, disk_id=None): """ List existing disk volumes for for given Linode. :keyword node: Node to list disk volumes for. (required) :type node: :class:`Node` :keyword disk_id: Id for specific disk volume. (optional) :type disk_id: ``int`` :rtype: ``list`` of :class:`StorageVolume` """ if not isinstance(node, Node): raise LinodeException(0xFD, "Invalid node instance") params = { "api_action": "linode.disk.list", "LinodeID": node.id } # Add param if disk_id was specified if disk_id is not None: params["DiskID"] = disk_id data = self.connection.request(API_ROOT, params=params).objects[0] return self._to_volumes(data) def _to_volumes(self, objs): """ Covert returned JSON volumes into StorageVolume instances :keyword objs: ``list`` of JSON dictionaries representing the StorageVolumes :type objs: ``list`` :return: ``list`` of :class:`StorageVolume`s """ volumes = {} for o in objs: vid = o["DISKID"] volumes[vid] = vol = StorageVolume(id=vid, name=o["LABEL"], size=int(o["SIZE"]), driver=self.connection.driver) vol.extra = copy(o) return list(volumes.values()) def _to_nodes(self, objs): """Convert returned JSON Linodes into Node instances :keyword objs: ``list`` of JSON dictionaries representing the Linodes :type objs: ``list`` :return: ``list`` of :class:`Node`s""" # Get the IP addresses for the Linodes nodes = {} batch = [] for o in objs: lid = o["LINODEID"] nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[], private_ips=[], state=self.LINODE_STATES[o["STATUS"]], driver=self.connection.driver) n.extra = copy(o) n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM")) batch.append({"api_action": "linode.ip.list", "LinodeID": lid}) # Avoid batch limitation ip_answers = [] args = [iter(batch)] * 25 if PY3: izip_longest = itertools.zip_longest else: izip_longest = getattr(itertools, 'izip_longest', _izip_longest) for twenty_five in izip_longest(*args): twenty_five = [q for q in twenty_five if q] params = {"api_action": "batch", "api_requestArray": json.dumps(twenty_five)} req = self.connection.request(API_ROOT, params=params) if not req.success() or len(req.objects) == 0: return None ip_answers.extend(req.objects) # Add the returned IPs to the nodes and return them for ip_list in ip_answers: for ip in ip_list: lid = ip["LINODEID"] which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\ nodes[lid].private_ips which.append(ip["IPADDRESS"]) return list(nodes.values()) def _izip_longest(*args, **kwds): """Taken from Python docs http://docs.python.org/library/itertools.html#itertools.izip """ fillvalue = kwds.get('fillvalue') def sentinel(counter=([fillvalue] * (len(args) - 1)).pop): yield counter() # yields the fillvalue, or raises IndexError fillers = itertools.repeat(fillvalue) iters = [itertools.chain(it, sentinel(), fillers) for it in args] try: for tup in itertools.izip(*iters): yield tup except IndexError: pass apache-libcloud-2.2.1/libcloud/compute/drivers/vcl.py0000664000175000017500000002107012705460761022513 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VCL driver """ import time from libcloud.common.base import ConnectionUserAndKey from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeSize, NodeImage class VCLResponse(XMLRPCResponse): exceptions = { 'VCL_Account': InvalidCredsError, } class VCLConnection(XMLRPCConnection, ConnectionUserAndKey): endpoint = '/index.php?mode=xmlrpccall' def add_default_headers(self, headers): headers['X-APIVERSION'] = '2' headers['X-User'] = self.user_id headers['X-Pass'] = self.key return headers class VCLNodeDriver(NodeDriver): """ VCL node driver :keyword host: The VCL host to which you make requests(required) :type host: ``str`` """ NODE_STATE_MAP = { 'ready': NodeState.RUNNING, 'failed': NodeState.TERMINATED, 'timedout': NodeState.TERMINATED, 'loading': NodeState.PENDING, 'time': NodeState.PENDING, 'future': NodeState.PENDING, 'error': NodeState.UNKNOWN, 'notready': NodeState.PENDING, 'notavailable': NodeState.TERMINATED, 'success': NodeState.PENDING } connectionCls = VCLConnection name = 'VCL' website = 'http://incubator.apache.org/vcl/' type = Provider.VCL def __init__(self, key, secret, secure=True, host=None, port=None, *args, **kwargs): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. :type secure: ``bool`` :param host: Override hostname used for connections. (required) :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :rtype: ``None`` """ if not host: raise Exception('When instantiating VCL driver directly ' + 'you also need to provide host') super(VCLNodeDriver, self).__init__(key, secret, secure=True, host=None, port=None, *args, **kwargs) def _vcl_request(self, method, *args): res = self.connection.request( method, *args ).object if(res['status'] == 'error'): raise LibcloudError(res['errormsg'], driver=self) return res def create_node(self, **kwargs): """Create a new VCL reservation size and name ignored, image is the id from list_image @inherits: :class:`NodeDriver.create_node` :keyword image: image is the id from list_image :type image: ``str`` :keyword start: start time as unix timestamp :type start: ``str`` :keyword length: length of time in minutes :type length: ``str`` """ image = kwargs["image"] start = kwargs.get('start', int(time.time())) length = kwargs.get('length', '60') res = self._vcl_request( "XMLRPCaddRequest", image.id, start, length ) return Node( id=res['requestid'], name=image.name, state=self.NODE_STATE_MAP[res['status']], public_ips=[], private_ips=[], driver=self, image=image.name ) def destroy_node(self, node): """ End VCL reservation for the node passed in. Throws error if request fails. :param node: The node to be destroyed :type node: :class:`Node` :rtype: ``bool`` """ try: self._vcl_request( 'XMLRPCendRequest', node.id ) except LibcloudError: return False return True def _to_image(self, img): return NodeImage( id=img['id'], name=img['name'], driver=self.connection.driver ) def list_images(self, location=None): """ List images available to the user provided credentials @inherits: :class:`NodeDriver.list_images` """ res = self.connection.request( "XMLRPCgetImages" ).object return [self._to_image(i) for i in res] def list_sizes(self, location=None): """ VCL does not choosing sizes for node creation. Size of images are statically set by administrators. @inherits: :class:`NodeDriver.list_sizes` """ return [NodeSize( 't1.micro', 'none', '512', 0, 0, 0, self) ] def _to_connect_data(self, request_id, ipaddr): res = self._vcl_request( "XMLRPCgetRequestConnectData", request_id, ipaddr ) return res def _to_status(self, requestid, imagename, ipaddr): res = self._vcl_request( "XMLRPCgetRequestStatus", requestid ) public_ips = [] extra = [] if(res['status'] == 'ready'): cdata = self._to_connect_data(requestid, ipaddr) public_ips = [cdata['serverIP']] extra = { 'user': cdata['user'], 'pass': cdata['password'] } return Node( id=requestid, name=imagename, state=self.NODE_STATE_MAP[res['status']], public_ips=public_ips, private_ips=[], driver=self, image=imagename, extra=extra ) def _to_nodes(self, res, ipaddr): return [self._to_status( h['requestid'], h['imagename'], ipaddr ) for h in res] def list_nodes(self, ipaddr): """ List nodes :param ipaddr: IP address which should be used :type ipaddr: ``str`` :rtype: ``list`` of :class:`Node` """ res = self._vcl_request( "XMLRPCgetRequestIds" ) return self._to_nodes(res['requests'], ipaddr) def ex_update_node_access(self, node, ipaddr): """ Update the remote ip accessing the node. :param node: the reservation node to update :type node: :class:`Node` :param ipaddr: the ipaddr used to access the node :type ipaddr: ``str`` :return: node with updated information :rtype: :class:`Node` """ return self._to_status(node.id, node.image, ipaddr) def ex_extend_request_time(self, node, minutes): """ Time in minutes to extend the requested node's reservation time :param node: the reservation node to update :type node: :class:`Node` :param minutes: the number of mintes to update :type minutes: ``str`` :return: true on success, throws error on failure :rtype: ``bool`` """ return self._vcl_request( "XMLRPCextendRequest", node.id, minutes ) def ex_get_request_end_time(self, node): """ Get the ending time of the node reservation. :param node: the reservation node to update :type node: :class:`Node` :return: unix timestamp :rtype: ``int`` """ res = self._vcl_request( "XMLRPCgetRequestIds" ) time = 0 for i in res['requests']: if i['requestid'] == node.id: time = i['end'] return time apache-libcloud-2.2.1/libcloud/compute/drivers/exoscale.py0000664000175000017500000000220512701023453023516 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver __all__ = [ 'ExoscaleNodeDriver' ] class ExoscaleNodeDriver(CloudStackNodeDriver): type = Provider.EXOSCALE name = 'Exoscale' website = 'https://www.exoscale.ch/' # API endpoint info host = 'api.exoscale.ch' path = '/compute' apache-libcloud-2.2.1/libcloud/compute/drivers/cloudscale.py0000664000175000017500000002011413153541406024035 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A driver for cloudscale.ch. """ import json from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import Provider, NodeState from libcloud.common.types import InvalidCredsError from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize class CloudscaleResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_error(self): body = self.parse_body() if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError(body['detail']) else: # We are taking the first issue here. There might be multiple ones, # but that doesn't really matter. It's nicer if the error is just # one error (because it's a Python API and there's only one # exception. return next(iter(body.values())) def success(self): return self.status in self.valid_response_codes class CloudscaleConnection(ConnectionKey): """ Connection class for the cloudscale.ch driver. """ host = 'api.cloudscale.ch' responseCls = CloudscaleResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ headers['Authorization'] = 'Bearer %s' % (self.key) headers['Content-Type'] = 'application/json' return headers class CloudscaleNodeDriver(NodeDriver): """ Cloudscale's node driver. """ connectionCls = CloudscaleConnection type = Provider.CLOUDSCALE name = 'Cloudscale' website = 'https://www.cloudscale.ch' NODE_STATE_MAP = dict( changing=NodeState.PENDING, running=NodeState.RUNNING, stopped=NodeState.STOPPED, paused=NodeState.PAUSED, ) def __init__(self, key, **kwargs): super(CloudscaleNodeDriver, self).__init__(key, **kwargs) def list_nodes(self): ''' List all your existing compute nodes. ''' return self._list_resources('/v1/servers', self._to_node) def list_sizes(self): ''' Lists all available sizes. On cloudscale these are known as flavors. ''' return self._list_resources('/v1/flavors', self._to_size) def list_images(self): ''' List all images. Images are identified by slugs on cloudscale.ch. This means that minor version upgrades (e.g. Ubuntu 16.04.1 to Ubuntu 16.04.2) will be possible within the same id ``ubuntu-16.04``. ''' return self._list_resources('/v1/images', self._to_image) def create_node(self, name, size, image, location=None, ex_create_attr={}): """ Create a node. The `ex_create_attr` parameter can include the following dictionary key and value pairs: * `ssh_keys`: ``list`` of ``str`` ssh public keys * `volume_size_gb`: ``int`` defaults to 10. * `bulk_volume_size_gb`: defaults to None. * `use_public_network`: ``bool`` defaults to True * `use_private_network`: ``bool`` defaults to False * `use_ipv6`: ``bool`` defaults to True * `anti_affinity_with`: ``uuid`` of a server to create an anti-affinity group with that server or add it to the same group as that server. * `user_data`: ``str`` for optional cloud-config data :keyword ex_create_attr: A dictionary of optional attributes for droplet creation :type ex_create_attr: ``dict`` :return: The newly created node. :rtype: :class:`Node` """ attr = dict(ex_create_attr) attr.update( name=name, image=image.id, flavor=size.id, ) result = self.connection.request( '/v1/servers', data=json.dumps(attr), method='POST' ) return self._to_node(result.object) def reboot_node(self, node): ''' Reboot a node. It's also possible to use ``node.reboot()``. ''' return self._action(node, 'reboot') def ex_start_node(self, node): ''' Start a node. This is only possible if the node is stopped. ''' return self._action(node, 'start') def ex_stop_node(self, node): ''' Stop a specific node. Similar to ``shutdown -h now``. This is only possible if the node is running. ''' return self._action(node, 'stop') def ex_node_by_uuid(self, uuid): ''' :param str ex_user_data: A valid uuid that references your exisiting cloudscale.ch server. :type ex_user_data: ``str`` :return: The server node you asked for. :rtype: :class:`Node` ''' res = self.connection.request(self._get_server_url(uuid)) return self._to_node(res.object) def destroy_node(self, node): ''' Delete a node. It's also possible to use ``node.destroy()``. This will irreversibly delete the cloudscale.ch server and all its volumes. So please be cautious. ''' res = self.connection.request( self._get_server_url(node.id), method='DELETE' ) return res.status == httplib.NO_CONTENT def _get_server_url(self, uuid): return '/v1/servers/%s' % uuid def _action(self, node, action_name): response = self.connection.request( self._get_server_url(node.id) + '/' + action_name, method='POST' ) return response.status == httplib.OK def _list_resources(self, url, tranform_func): data = self.connection.request(url, method='GET').object return [tranform_func(obj) for obj in data] def _to_node(self, data): state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) extra_keys_exclude = ['uuid', 'name', 'status', 'flavor', 'image'] extra = {} for k, v in data.items(): if k not in extra_keys_exclude: extra[k] = v public_ips = [] private_ips = [] for interface in data['interfaces']: if interface['type'] == 'public': ips = public_ips else: ips = private_ips for address_obj in interface['addresses']: ips.append(address_obj['address']) return Node( id=data['uuid'], name=data['name'], state=state, public_ips=public_ips, private_ips=private_ips, extra=extra, driver=self, image=self._to_image(data['image']), size=self._to_size(data['flavor']), ) def _to_size(self, data): extra = {'vcpu_count': data['vcpu_count']} ram = data['memory_gb'] * 1024 return NodeSize(id=data['slug'], name=data['name'], ram=ram, disk=10, bandwidth=0, price=0, extra=extra, driver=self) def _to_image(self, data): extra = {'operating_system': data['operating_system']} return NodeImage(id=data['slug'], name=data['name'], extra=extra, driver=self) apache-libcloud-2.2.1/libcloud/compute/drivers/ikoula.py0000664000175000017500000000222412701023453023200 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver __all__ = [ 'IkoulaNodeDriver' ] class IkoulaNodeDriver(CloudStackNodeDriver): type = Provider.IKOULA name = 'Ikoula' website = 'http://express.ikoula.co.uk/cloudstack' # API endpoint info host = 'cloudstack.ikoula.com' path = '/client/api' apache-libcloud-2.2.1/libcloud/compute/drivers/ecp.py0000664000175000017500000002667112701023453022477 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Enomaly ECP driver """ import time import base64 import os import socket import binascii from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b # JSON is included in the standard library starting with Python 2.6. For 2.5 # and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson try: import simplejson as json except ImportError: import json from libcloud.common.base import Response, ConnectionUserAndKey from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation from libcloud.compute.base import NodeImage, Node from libcloud.compute.types import Provider, NodeState, InvalidCredsError from libcloud.utils.networking import is_private_subnet # Defaults API_HOST = '' API_PORT = (80, 443) class ECPResponse(Response): def success(self): if self.status == httplib.OK or self.status == httplib.CREATED: try: j_body = json.loads(self.body) except ValueError: self.error = "JSON response cannot be decoded." return False if j_body['errno'] == 0: return True else: self.error = "ECP error: %s" % j_body['message'] return False elif self.status == httplib.UNAUTHORIZED: raise InvalidCredsError() else: self.error = "HTTP Error Code: %s" % self.status return False def parse_error(self): return self.error # Interpret the json responses - no error checking required def parse_body(self): return json.loads(self.body) def getheaders(self): return self.headers class ECPConnection(ConnectionUserAndKey): """ Connection class for the Enomaly ECP driver """ responseCls = ECPResponse host = API_HOST port = API_PORT def add_default_headers(self, headers): # Authentication username = self.user_id password = self.key base64string = base64.encodestring( b('%s:%s' % (username, password)))[:-1] authheader = "Basic %s" % base64string headers['Authorization'] = authheader return headers def _encode_multipart_formdata(self, fields): """ Based on Wade Leftwich's function: http://code.activestate.com/recipes/146306/ """ # use a random boundary that does not appear in the fields boundary = '' while boundary in ''.join(fields): boundary = binascii.hexlify(os.urandom(16)).decode('utf-8') L = [] for i in fields: L.append('--' + boundary) L.append('Content-Disposition: form-data; name="%s"' % i) L.append('') L.append(fields[i]) L.append('--' + boundary + '--') L.append('') body = '\r\n'.join(L) content_type = 'multipart/form-data; boundary=%s' % boundary header = {'Content-Type': content_type} return header, body class ECPNodeDriver(NodeDriver): """ Enomaly ECP node driver """ name = "Enomaly Elastic Computing Platform" website = 'http://www.enomaly.com/' type = Provider.ECP connectionCls = ECPConnection def list_nodes(self): """ Returns a list of all running Nodes :rtype: ``list`` of :class:`Node` """ # Make the call res = self.connection.request('/rest/hosting/vm/list').parse_body() # Put together a list of node objects nodes = [] for vm in res['vms']: node = self._to_node(vm) if node is not None: nodes.append(node) # And return it return nodes def _to_node(self, vm): """ Turns a (json) dictionary into a Node object. This returns only running VMs. """ # Check state if not vm['state'] == "running": return None # IPs iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1'] public_ips = [] private_ips = [] for ip in iplist: try: socket.inet_aton(ip) except socket.error: # not a valid ip continue if is_private_subnet(ip): private_ips.append(ip) else: public_ips.append(ip) # Create the node object n = Node( id=vm['uuid'], name=vm['name'], state=NodeState.RUNNING, public_ips=public_ips, private_ips=private_ips, driver=self, ) return n def reboot_node(self, node): """ Shuts down a VM and then starts it again. @inherits: :class:`NodeDriver.reboot_node` """ # Turn the VM off # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, method='POST', headers=d[0], data=d[1] ).parse_body() node.state = NodeState.REBOOTING # Wait for it to turn off and then continue (to turn it on again) while node.state == NodeState.REBOOTING: # Check if it's off. response = self.connection.request( '/rest/hosting/vm/%s' % node.id ).parse_body() if response['vm']['state'] == 'off': node.state = NodeState.TERMINATED else: time.sleep(5) # Turn the VM back on. # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'start'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, method='POST', headers=d[0], data=d[1] ).parse_body() node.state = NodeState.RUNNING return True def destroy_node(self, node): """ Shuts down and deletes a VM. @inherits: :class:`NodeDriver.destroy_node` """ # Shut down first # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, method='POST', headers=d[0], data=d[1] ).parse_body() # Ensure there was no application level error node.state = NodeState.PENDING # Wait for the VM to turn off before continuing while node.state == NodeState.PENDING: # Check if it's off. response = self.connection.request( '/rest/hosting/vm/%s' % node.id ).parse_body() if response['vm']['state'] == 'off': node.state = NodeState.TERMINATED else: time.sleep(5) # Delete the VM # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'delete'}) self.connection.request( '/rest/hosting/vm/%s' % (node.id), method='POST', headers=d[0], data=d[1] ).parse_body() return True def list_images(self, location=None): """ Returns a list of all package templates aka appliances aka images. @inherits: :class:`NodeDriver.list_images` """ # Make the call response = self.connection.request( '/rest/hosting/ptemplate/list').parse_body() # Turn the response into an array of NodeImage objects images = [] for ptemplate in response['packages']: images.append(NodeImage( id=ptemplate['uuid'], name='%s: %s' % (ptemplate['name'], ptemplate['description']), driver=self,) ) return images def list_sizes(self, location=None): """ Returns a list of all hardware templates @inherits: :class:`NodeDriver.list_sizes` """ # Make the call response = self.connection.request( '/rest/hosting/htemplate/list').parse_body() # Turn the response into an array of NodeSize objects sizes = [] for htemplate in response['templates']: sizes.append(NodeSize( id=htemplate['uuid'], name=htemplate['name'], ram=htemplate['memory'], disk=0, # Disk is independent of hardware template. bandwidth=0, # There is no way to keep track of bandwidth. price=0, # The billing system is external. driver=self,) ) return sizes def list_locations(self): """ This feature does not exist in ECP. Returns hard coded dummy location. :rtype: ``list`` of :class:`NodeLocation` """ return [NodeLocation(id=1, name="Cloud", country='', driver=self), ] def create_node(self, **kwargs): """ Creates a virtual machine. :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword size: The size of resources allocated to this node . (required) :type size: :class:`NodeSize` :keyword image: OS Image to boot on node. (required) :type image: :class:`NodeImage` :rtype: :class:`Node` """ # Find out what network to put the VM on. res = self.connection.request( '/rest/hosting/network/list').parse_body() # Use the first / default network because there is no way to specific # which one network = res['networks'][0]['uuid'] # Prepare to make the VM data = { 'name': str(kwargs['name']), 'package': str(kwargs['image'].id), 'hardware': str(kwargs['size'].id), 'network_uuid': str(network), 'disk': '' } # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata(data) response = self.connection.request( '/rest/hosting/vm/', method='PUT', headers=d[0], data=d[1] ).parse_body() # Create a node object and return it. n = Node( id=response['machine_id'], name=data['name'], state=NodeState.PENDING, public_ips=[], private_ips=[], driver=self, ) return n apache-libcloud-2.2.1/libcloud/compute/drivers/digitalocean.py0000664000175000017500000004641313153541406024354 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DigitalOcean Driver """ import json import warnings from libcloud.utils.iso8601 import parse_date from libcloud.utils.py3 import httplib from libcloud.common.digitalocean import DigitalOcean_v1_Error from libcloud.common.digitalocean import DigitalOcean_v2_BaseDriver from libcloud.common.types import InvalidCredsError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeImage, NodeSize, NodeLocation, KeyPair from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import StorageVolume, VolumeSnapshot __all__ = [ 'DigitalOceanNodeDriver', 'DigitalOcean_v2_NodeDriver' ] class DigitalOceanNodeDriver(NodeDriver): """ DigitalOcean NodeDriver defaulting to using APIv2. :keyword key: Personal Access Token required for authentication. :type key: ``str`` :keyword secret: Previously used with API version ``v1``. (deprecated) :type secret: ``str`` :keyword api_version: Specifies the API version to use. Defaults to using ``v2``, currently the only valid option. (optional) :type api_version: ``str`` """ type = Provider.DIGITAL_OCEAN name = 'DigitalOcean' website = 'https://www.digitalocean.com' def __new__(cls, key, secret=None, api_version='v2', **kwargs): if cls is DigitalOceanNodeDriver: if api_version == 'v1' or secret is not None: if secret is not None and api_version == 'v2': raise InvalidCredsError( 'secret not accepted for v2 authentication') raise DigitalOcean_v1_Error() elif api_version == 'v2': cls = DigitalOcean_v2_NodeDriver else: raise NotImplementedError('Unsupported API version: %s' % (api_version)) return super(DigitalOceanNodeDriver, cls).__new__(cls, **kwargs) # TODO Implement v1 driver using KeyPair class SSHKey(object): def __init__(self, id, name, pub_key): self.id = id self.name = name self.pub_key = pub_key def __repr__(self): return (('') % (self.id, self.name, self.pub_key)) class DigitalOcean_v2_NodeDriver(DigitalOcean_v2_BaseDriver, DigitalOceanNodeDriver): """ DigitalOcean NodeDriver using v2 of the API. """ NODE_STATE_MAP = {'new': NodeState.PENDING, 'off': NodeState.STOPPED, 'active': NodeState.RUNNING, 'archive': NodeState.TERMINATED} EX_CREATE_ATTRIBUTES = ['backups', 'ipv6', 'private_networking', 'ssh_keys'] def list_images(self): data = self._paginated_request('/v2/images', 'images') return list(map(self._to_image, data)) def list_key_pairs(self): """ List all the available SSH keys. :return: Available SSH keys. :rtype: ``list`` of :class:`KeyPair` """ data = self._paginated_request('/v2/account/keys', 'ssh_keys') return list(map(self._to_key_pair, data)) def list_locations(self): data = self._paginated_request('/v2/regions', 'regions') return list(map(self._to_location, data)) def list_nodes(self): data = self._paginated_request('/v2/droplets', 'droplets') return list(map(self._to_node, data)) def list_sizes(self): data = self._paginated_request('/v2/sizes', 'sizes') return list(map(self._to_size, data)) def list_volumes(self): data = self._paginated_request('/v2/volumes', 'volumes') return list(map(self._to_volume, data)) def create_node(self, name, size, image, location, ex_create_attr=None, ex_ssh_key_ids=None, ex_user_data=None): """ Create a node. The `ex_create_attr` parameter can include the following dictionary key and value pairs: * `backups`: ``bool`` defaults to False * `ipv6`: ``bool`` defaults to False * `private_networking`: ``bool`` defaults to False * `user_data`: ``str`` for cloud-config data * `ssh_keys`: ``list`` of ``int`` key ids or ``str`` fingerprints `ex_create_attr['ssh_keys']` will override `ex_ssh_key_ids` assignment. :keyword ex_create_attr: A dictionary of optional attributes for droplet creation :type ex_create_attr: ``dict`` :keyword ex_ssh_key_ids: A list of ssh key ids which will be added to the server. (optional) :type ex_ssh_key_ids: ``list`` of ``int`` key ids or ``str`` key fingerprints :keyword ex_user_data: User data to be added to the node on create. (optional) :type ex_user_data: ``str`` :return: The newly created node. :rtype: :class:`Node` """ attr = {'name': name, 'size': size.name, 'image': image.id, 'region': location.id, 'user_data': ex_user_data} if ex_ssh_key_ids: warnings.warn("The ex_ssh_key_ids parameter has been deprecated in" " favor of the ex_create_attr parameter.") attr['ssh_keys'] = ex_ssh_key_ids ex_create_attr = ex_create_attr or {} for key in ex_create_attr.keys(): if key in self.EX_CREATE_ATTRIBUTES: attr[key] = ex_create_attr[key] res = self.connection.request('/v2/droplets', data=json.dumps(attr), method='POST') data = res.object['droplet'] # TODO: Handle this in the response class status = res.object.get('status', 'OK') if status == 'ERROR': message = res.object.get('message', None) error_message = res.object.get('error_message', message) raise ValueError('Failed to create node: %s' % (error_message)) return self._to_node(data=data) def destroy_node(self, node): res = self.connection.request('/v2/droplets/%s' % (node.id), method='DELETE') return res.status == httplib.NO_CONTENT def reboot_node(self, node): attr = {'type': 'reboot'} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def create_image(self, node, name): """ Create an image from a Node. @inherits: :class:`NodeDriver.create_image` :param node: Node to use as base for image :type node: :class:`Node` :param node: Name for image :type node: ``str`` :rtype: ``bool`` """ attr = {'type': 'snapshot', 'name': name} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def delete_image(self, image): """Delete an image for node. @inherits: :class:`NodeDriver.delete_image` :param image: the image to be deleted :type image: :class:`NodeImage` :rtype: ``bool`` """ res = self.connection.request('/v2/images/%s' % (image.id), method='DELETE') return res.status == httplib.NO_CONTENT def get_image(self, image_id): """ Get an image based on an image_id @inherits: :class:`NodeDriver.get_image` :param image_id: Image identifier :type image_id: ``int`` :return: A NodeImage object :rtype: :class:`NodeImage` """ data = self._paginated_request('/v2/images/%s' % (image_id), 'image') return self._to_image(data) def ex_change_kernel(self, node, kernel_id): attr = {'type': 'change_kernel', 'kernel': kernel_id} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def ex_rename_node(self, node, name): attr = {'type': 'rename', 'name': name} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def ex_shutdown_node(self, node): attr = {'type': 'shutdown'} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def ex_hard_reboot(self, node): attr = {'type': 'power_cycle'} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def ex_power_on_node(self, node): attr = {'type': 'power_on'} res = self.connection.request('/v2/droplets/%s/actions' % (node.id), data=json.dumps(attr), method='POST') return res.status == httplib.CREATED def create_key_pair(self, name, public_key=''): """ Create a new SSH key. :param name: Key name (required) :type name: ``str`` :param public_key: Valid public key string (required) :type public_key: ``str`` """ attr = {'name': name, 'public_key': public_key} res = self.connection.request('/v2/account/keys', method='POST', data=json.dumps(attr)) data = res.object['ssh_key'] return self._to_key_pair(data=data) def delete_key_pair(self, key): """ Delete an existing SSH key. :param key: SSH key (required) :type key: :class:`KeyPair` """ key_id = key.extra['id'] res = self.connection.request('/v2/account/keys/%s' % (key_id), method='DELETE') return res.status == httplib.NO_CONTENT def get_key_pair(self, name): """ Retrieve a single key pair. :param name: Name of the key pair to retrieve. :type name: ``str`` :rtype: :class:`.KeyPair` """ qkey = [k for k in self.list_key_pairs() if k.name == name][0] data = self.connection.request('/v2/account/keys/%s' % qkey.extra['id']).object['ssh_key'] return self._to_key_pair(data=data) def create_volume(self, size, name, location=None, snapshot=None): """ Create a new volume. :param size: Size of volume in gigabytes (required) :type size: ``int`` :param name: Name of the volume to be created :type name: ``str`` :param location: Which data center to create a volume in. If empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param snapshot: Snapshot from which to create the new volume. (optional) :type snapshot: :class:`.VolumeSnapshot` :return: The newly created volume. :rtype: :class:`StorageVolume` """ attr = {'name': name, 'size_gigabytes': size, 'region': location.id} res = self.connection.request('/v2/volumes', data=json.dumps(attr), method='POST') data = res.object['volume'] status = res.object.get('status', 'OK') if status == 'ERROR': message = res.object.get('message', None) error_message = res.object.get('error_message', message) raise ValueError('Failed to create volume: %s' % (error_message)) return self._to_volume(data=data) def destroy_volume(self, volume): """ Destroys a storage volume. :param volume: Volume to be destroyed :type volume: :class:`StorageVolume` :rtype: ``bool`` """ res = self.connection.request('/v2/volumes/%s' % volume.id, method='DELETE') return res.status == httplib.NO_CONTENT def attach_volume(self, node, volume, device=None): """ Attaches volume to node. :param node: Node to attach volume to. :type node: :class:`.Node` :param volume: Volume to attach. :type volume: :class:`.StorageVolume` :param device: Where the device is exposed, e.g. '/dev/sdb' :type device: ``str`` :rytpe: ``bool`` """ attr = {'type': 'attach', 'droplet_id': node.id, 'volume_id': volume.id, 'region': volume.extra['region_slug']} res = self.connection.request('/v2/volumes/actions', data=json.dumps(attr), method='POST') return res.status == httplib.ACCEPTED def detach_volume(self, volume): """ Detaches a volume from a node. :param volume: Volume to be detached :type volume: :class:`.StorageVolume` :rtype: ``bool`` """ attr = {'type': 'detach', 'volume_id': volume.id, 'region': volume.extra['region_slug']} responses = [] for droplet_id in volume.extra['droplet_ids']: attr['droplet_id'] = droplet_id res = self.connection.request('/v2/volumes/actions', data=json.dumps(attr), method='POST') responses.append(res) return all([r.status == httplib.ACCEPTED for r in responses]) def create_volume_snapshot(self, volume, name): """ Create a new volume snapshot. :param volume: Volume to create a snapshot for :type volume: class:`StorageVolume` :return: The newly created volume snapshot. :rtype: :class:`VolumeSnapshot` """ attr = {'name': name} res = self.connection.request('/v2/volumes/%s/snapshots' % ( volume.id), data=json.dumps(attr), method='POST') data = res.object['snapshot'] return self._to_volume_snapshot(data=data) def list_volume_snapshots(self, volume): """ List snapshots for a volume. :param volume: Volume to list snapshots for :type volume: class:`StorageVolume` :return: List of volume snapshots. :rtype: ``list`` of :class: `StorageVolume` """ data = self._paginated_request('/v2/volumes/%s/snapshots' % (volume.id), 'snapshots') return list(map(self._to_volume_snapshot, data)) def delete_volume_snapshot(self, snapshot): """ Delete a volume snapshot :param snapshot: volume snapshot to delete :type snapshot: class:`VolumeSnapshot` :rtype: ``bool`` """ res = self.connection.request('v2/snapshots/%s' % (snapshot.id), method='DELETE') return res.status == httplib.NO_CONTENT def _to_node(self, data): extra_keys = ['memory', 'vcpus', 'disk', 'region', 'image', 'size_slug', 'locked', 'created_at', 'networks', 'kernel', 'backup_ids', 'snapshot_ids', 'features'] if 'status' in data: state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) else: state = NodeState.UNKNOWN created = parse_date(data['created_at']) networks = data['networks'] private_ips = [] public_ips = [] if networks: for net in networks['v4']: if net['type'] == 'private': private_ips = [net['ip_address']] if net['type'] == 'public': public_ips = [net['ip_address']] extra = {} for key in extra_keys: if key in data: extra[key] = data[key] node = Node(id=data['id'], name=data['name'], state=state, public_ips=public_ips, private_ips=private_ips, created_at=created, driver=self, extra=extra) return node def _to_image(self, data): extra = {'distribution': data['distribution'], 'public': data['public'], 'slug': data['slug'], 'regions': data['regions'], 'min_disk_size': data['min_disk_size'], 'created_at': data['created_at']} return NodeImage(id=data['id'], name=data['name'], driver=self, extra=extra) def _to_volume(self, data): extra = {'created_at': data['created_at'], 'droplet_ids': data['droplet_ids'], 'region': data['region'], 'region_slug': data['region']['slug']} return StorageVolume(id=data['id'], name=data['name'], size=data['size_gigabytes'], driver=self, extra=extra) def _to_location(self, data): return NodeLocation(id=data['slug'], name=data['name'], country=None, driver=self) def _to_size(self, data): extra = {'vcpus': data['vcpus'], 'regions': data['regions'], 'price_monthly': data['price_monthly']} return NodeSize(id=data['slug'], name=data['slug'], ram=data['memory'], disk=data['disk'], bandwidth=data['transfer'], price=data['price_hourly'], driver=self, extra=extra) def _to_key_pair(self, data): extra = {'id': data['id']} return KeyPair(name=data['name'], fingerprint=data['fingerprint'], public_key=data['public_key'], private_key=None, driver=self, extra=extra) def _to_volume_snapshot(self, data): extra = {'created_at': data['created_at'], 'resource_id': data['resource_id'], 'regions': data['regions'], 'min_disk_size': data['min_disk_size']} return VolumeSnapshot(id=data['id'], name=data['name'], size=data['size_gigabytes'], driver=self, extra=extra) apache-libcloud-2.2.1/libcloud/compute/drivers/elastichosts.py0000664000175000017500000001623512701023453024430 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ElasticHosts Driver """ from libcloud.compute.types import Provider from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver # API end-points API_ENDPOINTS = { 'lon-p': { 'name': 'London Peer 1', 'country': 'United Kingdom', 'host': 'api-lon-p.elastichosts.com' }, 'lon-b': { 'name': 'London BlueSquare', 'country': 'United Kingdom', 'host': 'api-lon-b.elastichosts.com' }, 'sat-p': { 'name': 'San Antonio Peer 1', 'country': 'United States', 'host': 'api-sat-p.elastichosts.com' }, 'lax-p': { 'name': 'Los Angeles Peer 1', 'country': 'United States', 'host': 'api-lax-p.elastichosts.com' }, 'sjc-c': { 'name': 'San Jose (Silicon Valley)', 'country': 'United States', 'host': 'api-sjc-c.elastichosts.com' }, 'tor-p': { 'name': 'Toronto Peer 1', 'country': 'Canada', 'host': 'api-tor-p.elastichosts.com' }, 'syd-y': { 'name': 'Sydney', 'country': 'Australia', 'host': 'api-syd-v.elastichosts.com' }, 'cn-1': { 'name': 'Hong Kong', 'country': 'China', 'host': 'api-hkg-e.elastichosts.com' } } # Default API end-point for the base connection class. DEFAULT_REGION = 'sat-p' # Retrieved from http://www.elastichosts.com/cloud-hosting/api STANDARD_DRIVES = { '38df0986-4d85-4b76-b502-3878ffc80161': { 'uuid': '38df0986-4d85-4b76-b502-3878ffc80161', 'description': 'CentOS Linux 5.5', 'size_gunzipped': '3GB', 'supports_deployment': True, }, '980cf63c-f21e-4382-997b-6541d5809629': { 'uuid': '980cf63c-f21e-4382-997b-6541d5809629', 'description': 'Debian Linux 5.0', 'size_gunzipped': '1GB', 'supports_deployment': True, }, 'aee5589a-88c3-43ef-bb0a-9cab6e64192d': { 'uuid': 'aee5589a-88c3-43ef-bb0a-9cab6e64192d', 'description': 'Ubuntu Linux 10.04', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '62f512cd-82c7-498e-88d8-a09ac2ef20e7': { 'uuid': '62f512cd-82c7-498e-88d8-a09ac2ef20e7', 'description': 'Ubuntu Linux 12.04', 'size_gunzipped': '1GB', 'supports_deployment': True, }, 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': { 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0', 'description': 'Windows Web Server 2008', 'size_gunzipped': '13GB', 'supports_deployment': False, }, '30824e97-05a4-410c-946e-2ba5a92b07cb': { 'uuid': '30824e97-05a4-410c-946e-2ba5a92b07cb', 'description': 'Windows Web Server 2008 R2', 'size_gunzipped': '13GB', 'supports_deployment': False, }, '9ecf810e-6ad1-40ef-b360-d606f0444671': { 'uuid': '9ecf810e-6ad1-40ef-b360-d606f0444671', 'description': 'Windows Web Server 2008 R2 + SQL Server', 'size_gunzipped': '13GB', 'supports_deployment': False, }, '10a88d1c-6575-46e3-8d2c-7744065ea530': { 'uuid': '10a88d1c-6575-46e3-8d2c-7744065ea530', 'description': 'Windows Server 2008 Standard R2', 'size_gunzipped': '13GB', 'supports_deployment': False, }, '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47': { 'uuid': '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47', 'description': 'Windows Server 2008 Standard R2 + SQL Server', 'size_gunzipped': '13GB', 'supports_deployment': False, }, } class ElasticHostsException(Exception): def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) class ElasticHostsNodeDriver(ElasticStackBaseNodeDriver): """ Node Driver class for ElasticHosts """ type = Provider.ELASTICHOSTS api_name = 'elastichosts' name = 'ElasticHosts' website = 'http://www.elastichosts.com/' features = {"create_node": ["generates_password"]} _standard_drives = STANDARD_DRIVES def __init__(self, key, secret=None, secure=True, host=None, port=None, region=DEFAULT_REGION, **kwargs): if hasattr(self, '_region'): region = self._region if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self._host_argument_set = host is not None super(ElasticHostsNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) def _ex_connection_class_kwargs(self): """ Return the host value based on the user supplied region. """ kwargs = {} if not self._host_argument_set: kwargs['host'] = API_ENDPOINTS[self.region]['host'] return kwargs class ElasticHostsUK1NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the London Peer 1 end-point """ name = 'ElasticHosts (lon-p)' _region = 'lon-p' class ElasticHostsUK2NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the London Bluesquare end-point """ name = 'ElasticHosts (lon-b)' _region = 'lon-b' class ElasticHostsUS1NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the San Antonio Peer 1 end-point """ name = 'ElasticHosts (sat-p)' _region = 'sat-p' class ElasticHostsUS2NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the Los Angeles Peer 1 end-point """ name = 'ElasticHosts (lax-p)' _region = 'lax-p' class ElasticHostsUS3NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the San Jose (Silicon Valley) end-point """ name = 'ElasticHosts (sjc-c)' _region = 'sjc-c' class ElasticHostsCA1NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the Toronto Peer 1 end-point """ name = 'ElasticHosts (tor-p)' _region = 'tor-p' class ElasticHostsAU1NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the Sydney end-point """ name = 'ElasticHosts (syd-y)' _region = 'syd-y' class ElasticHostsCN1NodeDriver(ElasticHostsNodeDriver): """ ElasticHosts node driver for the Hong Kong end-point """ name = 'ElasticHosts (cn-1)' _region = 'cn-1' apache-libcloud-2.2.1/libcloud/compute/drivers/kili.py0000664000175000017500000000617713153541406022664 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ HP Public cloud driver which is essentially just a small wrapper around OpenStack driver. """ from libcloud.compute.types import Provider, LibcloudError from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver __all__ = [ 'KiliCloudNodeDriver' ] ENDPOINT_ARGS = { 'service_type': 'compute', 'name': 'nova', 'region': 'RegionOne' } AUTH_URL = 'https://api.kili.io/keystone/v2.0/tokens' class KiliCloudConnection(OpenStack_1_1_Connection): _auth_version = '2.0_password' def __init__(self, *args, **kwargs): self.region = kwargs.pop('region', None) self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) super(KiliCloudConnection, self).__init__(*args, **kwargs) self._auth_version = KiliCloudConnection._auth_version def get_endpoint(self): if not self.get_endpoint_args: raise LibcloudError( 'KiliCloudConnection must have get_endpoint_args set') if '2.0_password' in self._auth_version: ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) else: raise LibcloudError( 'Auth version "%s" not supported' % (self._auth_version)) public_url = ep.url if not public_url: raise LibcloudError('Could not find specified endpoint') return public_url class KiliCloudNodeDriver(OpenStack_1_1_NodeDriver): name = 'Kili Public Cloud' website = 'http://kili.io/' connectionCls = KiliCloudConnection type = Provider.HPCLOUD def __init__(self, key, secret, tenant_name, secure=True, host=None, port=None, **kwargs): """ Note: tenant_name argument is required for Kili cloud. """ self.tenant_name = tenant_name super(KiliCloudNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs['get_endpoint_args'] = ENDPOINT_ARGS kwargs['ex_force_auth_url'] = AUTH_URL kwargs['ex_tenant_name'] = self.tenant_name return kwargs apache-libcloud-2.2.1/libcloud/compute/drivers/nephoscale.py0000664000175000017500000004150612701023453024043 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NephoScale Cloud driver (http://www.nephoscale.com) API documentation: http://docs.nephoscale.com Created by Markos Gogoulos (https://mist.io) """ import base64 import sys import time import os import binascii from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.py3 import urlencode from libcloud.compute.providers import Provider from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.compute.types import (NodeState, InvalidCredsError, LibcloudError) from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize, NodeLocation) from libcloud.utils.networking import is_private_subnet API_HOST = 'api.nephoscale.com' NODE_STATE_MAP = { 'on': NodeState.RUNNING, 'off': NodeState.UNKNOWN, 'unknown': NodeState.UNKNOWN, } VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] # used in create_node and specifies how many times to get the list of nodes and # check if the newly created node is there. This is because when a request is # sent to create a node, NephoScale replies with the job id, and not the node # itself thus we don't have the ip addresses, that are required in deploy_node CONNECT_ATTEMPTS = 10 class NodeKey(object): def __init__(self, id, name, public_key=None, key_group=None, password=None): self.id = id self.name = name self.key_group = key_group self.password = password self.public_key = public_key def __repr__(self): return (('') % (self.id, self.name)) class NephoscaleResponse(JsonResponse): """ Nephoscale API Response """ def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError('Authorization Failed') if self.status == httplib.NOT_FOUND: raise Exception("The resource you are looking for is not found.") return self.body def success(self): return self.status in VALID_RESPONSE_CODES class NephoscaleConnection(ConnectionUserAndKey): """ Nephoscale connection class. Authenticates to the API through Basic Authentication with username/password """ host = API_HOST responseCls = NephoscaleResponse allow_insecure = False def add_default_headers(self, headers): """ Add parameters that are necessary for every request """ user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class NephoscaleNodeDriver(NodeDriver): """ Nephoscale node driver class. >>> from libcloud.compute.providers import get_driver >>> driver = get_driver('nephoscale') >>> conn = driver('nepho_user','nepho_password') >>> conn.list_nodes() """ type = Provider.NEPHOSCALE api_name = 'nephoscale' name = 'NephoScale' website = 'http://www.nephoscale.com' connectionCls = NephoscaleConnection features = {'create_node': ['ssh_key']} def list_locations(self): """ List available zones for deployment :rtype: ``list`` of :class:`NodeLocation` """ result = self.connection.request('/datacenter/zone/').object locations = [] for value in result.get('data', []): location = NodeLocation(id=value.get('id'), name=value.get('name'), country='US', driver=self) locations.append(location) return locations def list_images(self): """ List available images for deployment :rtype: ``list`` of :class:`NodeImage` """ result = self.connection.request('/image/server/').object images = [] for value in result.get('data', []): extra = {'architecture': value.get('architecture'), 'disks': value.get('disks'), 'billable_type': value.get('billable_type'), 'pcpus': value.get('pcpus'), 'cores': value.get('cores'), 'uri': value.get('uri'), 'storage': value.get('storage'), } image = NodeImage(id=value.get('id'), name=value.get('friendly_name'), driver=self, extra=extra) images.append(image) return images def list_sizes(self): """ List available sizes containing prices :rtype: ``list`` of :class:`NodeSize` """ result = self.connection.request('/server/type/cloud/').object sizes = [] for value in result.get('data', []): value_id = value.get('id') size = NodeSize(id=value_id, name=value.get('friendly_name'), ram=value.get('ram'), disk=value.get('storage'), bandwidth=None, price=self._get_size_price(size_id=str(value_id)), driver=self) sizes.append(size) return sorted(sizes, key=lambda k: k.price) def list_nodes(self): """ List available nodes :rtype: ``list`` of :class:`Node` """ result = self.connection.request('/server/cloud/').object nodes = [self._to_node(value) for value in result.get('data', [])] return nodes def rename_node(self, node, name, hostname=None): """rename a cloud server, optionally specify hostname too""" data = {'name': name} if hostname: data['hostname'] = hostname params = urlencode(data) result = self.connection.request('/server/cloud/%s/' % node.id, data=params, method='PUT').object return result.get('response') in VALID_RESPONSE_CODES def reboot_node(self, node): """reboot a running node""" result = self.connection.request('/server/cloud/%s/initiator/restart/' % node.id, method='POST').object return result.get('response') in VALID_RESPONSE_CODES def ex_start_node(self, node): """start a stopped node""" result = self.connection.request('/server/cloud/%s/initiator/start/' % node.id, method='POST').object return result.get('response') in VALID_RESPONSE_CODES def ex_stop_node(self, node): """stop a running node""" result = self.connection.request('/server/cloud/%s/initiator/stop/' % node.id, method='POST').object return result.get('response') in VALID_RESPONSE_CODES def destroy_node(self, node): """destroy a node""" result = self.connection.request('/server/cloud/%s/' % node.id, method='DELETE').object return result.get('response') in VALID_RESPONSE_CODES def ex_list_keypairs(self, ssh=False, password=False, key_group=None): """ List available console and server keys There are two types of keys for NephoScale, ssh and password keys. If run without arguments, lists all keys. Otherwise list only ssh keys, or only password keys. Password keys with key_group 4 are console keys. When a server is created, it has two keys, one password or ssh key, and one password console key. :keyword ssh: if specified, show ssh keys only (optional) :type ssh: ``bool`` :keyword password: if specified, show password keys only (optional) :type password: ``bool`` :keyword key_group: if specified, show keys with this key_group only eg key_group=4 for console password keys (optional) :type key_group: ``int`` :rtype: ``list`` of :class:`NodeKey` """ if (ssh and password): raise LibcloudError('You can only supply ssh or password. To \ get all keys call with no arguments') if ssh: result = self.connection.request('/key/sshrsa/').object elif password: result = self.connection.request('/key/password/').object else: result = self.connection.request('/key/').object keys = [self._to_key(value) for value in result.get('data', [])] if key_group: keys = [key for key in keys if key.key_group == key_group] return keys def ex_create_keypair(self, name, public_key=None, password=None, key_group=None): """Creates a key, ssh or password, for server or console The group for the key (key_group) is 1 for Server and 4 for Console Returns the id of the created key """ if public_key: if not key_group: key_group = 1 data = { 'name': name, 'public_key': public_key, 'key_group': key_group } params = urlencode(data) result = self.connection.request('/key/sshrsa/', data=params, method='POST').object else: if not key_group: key_group = 4 if not password: password = self.random_password() data = { 'name': name, 'password': password, 'key_group': key_group } params = urlencode(data) result = self.connection.request('/key/password/', data=params, method='POST').object return result.get('data', {}).get('id', '') def ex_delete_keypair(self, key_id, ssh=False): """Delete an ssh key or password given it's id """ if ssh: result = self.connection.request('/key/sshrsa/%s/' % key_id, method='DELETE').object else: result = self.connection.request('/key/password/%s/' % key_id, method='DELETE').object return result.get('response') in VALID_RESPONSE_CODES def create_node(self, name, size, image, server_key=None, console_key=None, zone=None, **kwargs): """Creates the node, and sets the ssh key, console key NephoScale will respond with a 200-200 response after sending a valid request. If nowait=True is specified in the args, we then ask a few times until the server is created and assigned a public IP address, so that deploy_node can be run >>> from libcloud.compute.providers import get_driver >>> driver = get_driver('nephoscale') >>> conn = driver('nepho_user','nepho_password') >>> conn.list_nodes() >>> name = 'staging-server' >>> size = conn.list_sizes()[0] >>> image = conn.list_images()[9] >>> server_keys = conn.ex_list_keypairs(key_group=1)[0] >>> server_key = conn.ex_list_keypairs(key_group=1)[0].id 70867 >>> console_keys = conn.ex_list_keypairs(key_group=4)[0] >>> console_key = conn.ex_list_keypairs(key_group=4)[0].id 70907 >>> node = conn.create_node(name=name, size=size, image=image, \ console_key=console_key, server_key=server_key) We can also create an ssh key, plus a console key and deploy node with them >>> server_key = conn.ex_create_keypair(name, public_key='123') 71211 >>> console_key = conn.ex_create_keypair(name, key_group=4) 71213 We can increase the number of connect attempts to wait until the node is created, so that deploy_node has ip address to deploy the script We can also specify the location >>> location = conn.list_locations()[0] >>> node = conn.create_node(name=name, >>> ... size=size, >>> ... image=image, >>> ... console_key=console_key, >>> ... server_key=server_key, >>> ... connect_attempts=10, >>> ... nowait=True, >>> ... zone=location.id) """ hostname = kwargs.get('hostname', name) service_type = size.id image = image.id connect_attempts = int(kwargs.get('connect_attempts', CONNECT_ATTEMPTS)) data = {'name': name, 'hostname': hostname, 'service_type': service_type, 'image': image, 'server_key': server_key, 'console_key': console_key, 'zone': zone } params = urlencode(data) try: node = self.connection.request('/server/cloud/', data=params, method='POST') except Exception: e = sys.exc_info()[1] raise Exception("Failed to create node %s" % e) node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[], private_ips=[], driver=self) nowait = kwargs.get('ex_wait', False) if not nowait: return node else: # try to get the created node public ips, for use in deploy_node # At this point we don't have the id of the newly created Node, # so search name in nodes created_node = False while connect_attempts > 0: nodes = self.list_nodes() created_node = [c_node for c_node in nodes if c_node.name == name] if created_node: return created_node[0] else: time.sleep(60) connect_attempts = connect_attempts - 1 return node def _to_node(self, data): """Convert node in Node instances """ state = NODE_STATE_MAP.get(data.get('power_status'), '4') public_ips = [] private_ips = [] ip_addresses = data.get('ipaddresses', '') # E.g. "ipaddresses": "198.120.14.6, 10.132.60.1" if ip_addresses: for ip in ip_addresses.split(','): ip = ip.replace(' ', '') if is_private_subnet(ip): private_ips.append(ip) else: public_ips.append(ip) extra = { 'zone_data': data.get('zone'), 'zone': data.get('zone', {}).get('name'), 'image': data.get('image', {}).get('friendly_name'), 'create_time': data.get('create_time'), 'network_ports': data.get('network_ports'), 'is_console_enabled': data.get('is_console_enabled'), 'service_type': data.get('service_type', {}).get('friendly_name'), 'hostname': data.get('hostname') } node = Node(id=data.get('id'), name=data.get('name'), state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra=extra) return node def _to_key(self, data): return NodeKey(id=data.get('id'), name=data.get('name'), password=data.get('password'), key_group=data.get('key_group'), public_key=data.get('public_key')) def random_password(self, size=8): value = os.urandom(size) password = binascii.hexlify(value).decode('ascii') return password[:size] apache-libcloud-2.2.1/libcloud/compute/drivers/abiquo.py0000664000175000017500000007535113153541406023214 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abiquo Compute Driver The driver implements the compute Abiquo functionality for the Abiquo API. This version is compatible with the following versions of Abiquo: * Abiquo 3.4 (http://wiki.abiquo.com/display/ABI34/The+Abiquo+API) """ from libcloud.utils.py3 import ET from libcloud.compute.base import NodeDriver, NodeSize from libcloud.compute.types import Provider, LibcloudError from libcloud.common.abiquo import (AbiquoConnection, get_href, AbiquoResponse) from libcloud.compute.base import NodeLocation, NodeImage, Node from libcloud.utils.py3 import tostring class AbiquoNodeDriver(NodeDriver): """ Implements the :class:`NodeDriver`'s for the Abiquo Compute Provider """ type = Provider.ABIQUO name = 'Abiquo' website = 'http://www.abiquo.com/' connectionCls = AbiquoConnection timeout = 2000 # some images take a lot of time! # Media Types NODES_MIME_TYPE = 'application/vnd.abiquo.virtualmachines+xml' NODE_MIME_TYPE = 'application/vnd.abiquo.virtualmachine+xml' VAPPS_MIME_TYPE = 'application/vnd.abiquo.virtualappliances+xml' VAPP_MIME_TYPE = 'application/vnd.abiquo.virtualappliance+xml' VM_TASK_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetask+xml' USER_MIME_TYPE = 'application/vnd.abiquo.user+xml' ENT_MIME_TYPE = 'application/vnd.abiquo.enterprise+xml' VDCS_MIME_TYPE = 'application/vnd.abiquo.virtualdatacenters+xml' VDC_MIME_TYPE = 'application/vnd.abiquo.virtualdatacenter+xml' DCS_MIME_TYPE = 'application/vnd.abiquo.datacenters+xml' VMTPLS_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetemplates+xml' VMTPL_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetemplate+xml' NICS_MIME_TYPE = 'application/vnd.abiquo.nics+xml' DCRS_MIME_TYPE = 'application/vnd.abiquo.datacenterrepositories+xml' DCR_MIME_TYPE = 'application/vnd.abiquo.datacenterrepository+xml' AR_MIME_TYPE = 'application/vnd.abiquo.acceptedrequest+xml' # Others constants GIGABYTE = 1073741824 def __init__(self, user_id, secret, endpoint, **kwargs): """ Initializes Abiquo Driver Initializes the :class:`NodeDriver` object and populate the cache. :param user_id: identifier of Abiquo user (required) :type user_id: ``str`` :param secret: password of the Abiquo user (required) :type secret: ``str`` :param endpoint: Abiquo API endpoint (required) :type endpoint: ``str`` that can be parsed as URL """ self.endpoint = endpoint super(AbiquoNodeDriver, self).__init__(key=user_id, secret=secret, secure=False, host=None, port=None, **kwargs) self.ex_populate_cache() def create_node(self, **kwargs): """ Create a new node instance in Abiquo All the :class:`Node`s need to be defined inside a VirtualAppliance (called :class:`NodeGroup` here). If there is no group name defined, 'libcloud' name will be used instead. This method wraps these Abiquo actions: 1. Create a group if it does not exist. 2. Register a new node in the group. 3. Deploy the node and boot it. 4. Retrieves it again to get schedule-time attributes (such as ips and vnc ports). The rest of the driver methods has been created in a way that, if any of these actions fail, the user can not reach an inconsistent state :keyword name: The name for this new node (required) :type name: ``str`` :keyword size: The size of resources allocated to this node. :type size: :class:`NodeSize` :keyword image: OS Image to boot on node. (required) :type image: :class:`NodeImage` :keyword location: Which data center to create a node in. If empty, undefined behavior will be selected. (optional) :type location: :class:`NodeLocation` :keyword group_name: Which group this node belongs to. If empty, it will be created into 'libcloud' group. If it does not found any group in the target location (random location if you have not set the parameter), then it will create a new group with this name. :type group_name: c{str} :return: The newly created node. :rtype: :class:`Node` """ # Define the location # To be clear: # 'xml_loc' is the xml element we navigate into (we need links) # 'loc' is the :class:`NodeLocation` entity xml_loc, loc = self._define_create_node_location(**kwargs) # Define the Group group = self._define_create_node_group(xml_loc, loc, **kwargs) # Register the Node vm = self._define_create_node_node(group, **kwargs) # Execute the 'create' in hypervisor action self._deploy_remote(vm) # Retrieve it again, to get some schedule-time defined values edit_vm = get_href(vm, 'edit') headers = {'Accept': self.NODE_MIME_TYPE} vm = self.connection.request(edit_vm, headers=headers).object return self._to_node(vm, self) def destroy_node(self, node): """ Destroy a node Depending on the provider, this may destroy all data associated with the node, including backups. :param node: The node to be destroyed :type node: :class:`Node` :return: True if the destroy was successful, otherwise False :rtype: ``bool`` """ # Refresh node state headers = {'Accept': self.NODE_MIME_TYPE} e_vm = self.connection.request(node.extra['uri_id'], headers=headers).object state = e_vm.findtext('state') if state in ['ALLOCATED', 'CONFIGURED', 'LOCKED', 'UNKNOWN']: raise LibcloudError('Invalid Node state', self) if state != 'NOT_ALLOCATED': # prepare the element that forces the undeploy vm_task = ET.Element('virtualmachinetask') force_undeploy = ET.SubElement(vm_task, 'forceUndeploy') force_undeploy.text = 'True' # Set the URI destroy_uri = node.extra['uri_id'] + '/action/undeploy' # Prepare the headers headers = {'Accept': self.AR_MIME_TYPE, 'Content-type': self.VM_TASK_MIME_TYPE} res = self.connection.async_request(action=destroy_uri, method='POST', data=tostring(vm_task), headers=headers) if state == 'NOT_ALLOCATED' or res.async_success(): self.connection.request(action=node.extra['uri_id'], method='DELETE') return True else: return False def ex_run_node(self, node): """ Runs a node Here there is a bit difference between Abiquo states and libcloud states, so this method is created to have better compatibility. In libcloud, if the node is not running, then it does not exist (avoiding UNKNOWN and temporal states). In Abiquo, you can define a node, and then deploy it. If the node is in :class:`NodeState.TERMINATED` libcloud's state and in 'NOT_DEPLOYED' Abiquo state, there is a way to run and recover it for libcloud using this method. There is no way to reach this state if you are using only libcloud, but you may have used another Abiquo client and now you want to recover your node to be used by libcloud. :param node: The node to run :type node: :class:`Node` :return: The node itself, but with the new state :rtype: :class:`Node` """ # Refresh node state e_vm = self.connection.request(node.extra['uri_id']).object state = e_vm.findtext('state') if state != 'NOT_ALLOCATED': raise LibcloudError('Invalid Node state', self) # -------------------------------------------------------- # Deploy the Node # -------------------------------------------------------- self._deploy_remote(e_vm) # -------------------------------------------------------- # Retrieve it again, to get some schedule-defined # values. # -------------------------------------------------------- edit_vm = get_href(e_vm, 'edit') headers = {'Accept': self.NODE_MIME_TYPE} e_vm = self.connection.request(edit_vm, headers=headers).object return self._to_node(e_vm, self) def ex_populate_cache(self): """ Populate the cache. For each connection, it is good to store some objects that will be useful for further requests, such as the 'user' and the 'enterprise' objects. Executes the 'login' resource after setting the connection parameters and, if the execution is successful, it sets the 'user' object into cache. After that, it also requests for the 'enterprise' and 'locations' data. List of locations should remain the same for a single libcloud connection. However, this method is public and you are able to refresh the list of locations any time. """ user_headers = {'Accept': self.USER_MIME_TYPE} user = self.connection.request('/login', headers=user_headers).object self.connection.cache['user'] = user e_ent = get_href(self.connection.cache['user'], 'enterprise') ent_headers = {'Accept': self.ENT_MIME_TYPE} ent = self.connection.request(e_ent, headers=ent_headers).object self.connection.cache['enterprise'] = ent vdcs_headers = {'Accept': self.VDCS_MIME_TYPE} uri_vdcs = '/cloud/virtualdatacenters' e_vdcs = self.connection.request(uri_vdcs, headers=vdcs_headers).object params = {"idEnterprise": self._get_enterprise_id()} dcs_headers = {'Accept': self.DCS_MIME_TYPE} e_dcs = self.connection.request('/admin/datacenters', headers=dcs_headers, params=params).object dc_dict = {} for dc in e_dcs.findall('datacenter'): key = get_href(dc, 'self') dc_dict[key] = dc # Populate locations name cache self.connection.cache['locations'] = {} for e_vdc in e_vdcs.findall('virtualDatacenter'): loc = get_href(e_vdc, 'location') if loc is not None: self.connection.cache['locations'][loc] = get_href(e_vdc, 'edit') def ex_create_group(self, name, location=None): """ Create an empty group. You can specify the location as well. :param group: name of the group (required) :type group: ``str`` :param location: location were to create the group :type location: :class:`NodeLocation` :returns: the created group :rtype: :class:`NodeGroup` """ # prepare the element vapp = ET.Element('virtualAppliance') vapp_name = ET.SubElement(vapp, 'name') vapp_name.text = name if location is None: location = self.list_locations()[0] elif location not in self.list_locations(): raise LibcloudError('Location does not exist') link_vdc = self.connection.cache['locations'][location] hdr_vdc = {'Accept': self.VDC_MIME_TYPE} e_vdc = self.connection.request(link_vdc, headers=hdr_vdc).object creation_link = get_href(e_vdc, 'virtualappliances') headers = {'Accept': self.VAPP_MIME_TYPE, 'Content-type': self.VAPP_MIME_TYPE} vapp = self.connection.request(creation_link, data=tostring(vapp), headers=headers, method='POST').object uri_vapp = get_href(vapp, 'edit') return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp) def ex_destroy_group(self, group): """ Destroy a group. Be careful! Destroying a group means destroying all the :class:`Node` instances there and the group itself! If there is currently any action over any :class:`Node` of the :class:`NodeGroup`, then the method will raise an exception. :param name: The group (required) :type name: :class:`NodeGroup` :return: If the group was destroyed successfully :rtype: ``bool`` """ # Refresh group state e_group = self.connection.request(group.uri).object state = e_group.findtext('state') if state not in ['NOT_DEPLOYED', 'DEPLOYED']: error = 'Can not destroy group because of current state' raise LibcloudError(error, self) if state == 'DEPLOYED': # prepare the element that forces the undeploy vm_task = ET.Element('virtualmachinetask') force_undeploy = ET.SubElement(vm_task, 'forceUndeploy') force_undeploy.text = 'True' # Set the URI undeploy_uri = group.uri + '/action/undeploy' # Prepare the headers headers = {'Accept': self.AR_MIME_TYPE, 'Content-type': self.VM_TASK_MIME_TYPE} res = self.connection.async_request(action=undeploy_uri, method='POST', data=tostring(vm_task), headers=headers) if state == 'NOT_DEPLOYED' or res.async_success(): # The node is no longer deployed. Unregister it. self.connection.request(action=group.uri, method='DELETE') return True else: return False def ex_list_groups(self, location=None): """ List all groups. :param location: filter the groups by location (optional) :type location: a :class:`NodeLocation` instance. :return: the list of :class:`NodeGroup` """ groups = [] for vdc in self._get_locations(location): link_vdc = self.connection.cache['locations'][vdc] hdr_vdc = {'Accept': self.VDC_MIME_TYPE} e_vdc = self.connection.request(link_vdc, headers=hdr_vdc).object apps_link = get_href(e_vdc, 'virtualappliances') hdr_vapps = {'Accept': self.VAPPS_MIME_TYPE} vapps = self.connection.request(apps_link, headers=hdr_vapps).object for vapp in vapps.findall('virtualAppliance'): nodes = [] vms_link = get_href(vapp, 'virtualmachines') headers = {'Accept': self.NODES_MIME_TYPE} vms = self.connection.request(vms_link, headers=headers).object for vm in vms.findall('virtualMachine'): nodes.append(self._to_node(vm, self)) group = NodeGroup(self, vapp.findtext('name'), nodes, get_href(vapp, 'edit')) groups.append(group) return groups def list_images(self, location=None): """ List images on Abiquo Repositories :keyword location: The location to list images for. :type location: :class:`NodeLocation` :return: list of node image objects :rtype: ``list`` of :class:`NodeImage` """ enterprise_id = self._get_enterprise_id() uri = '/admin/enterprises/%s/datacenterrepositories/' % (enterprise_id) repos_hdr = {'Accept': self.DCRS_MIME_TYPE} repos = self.connection.request(uri, headers=repos_hdr).object images = [] for repo in repos.findall('datacenterRepository'): # filter by location. Skips when the name of the location # is different from the 'datacenterRepository' element for vdc in self._get_locations(location): # Check if the virtual datacenter belongs to this repo link_vdc = self.connection.cache['locations'][vdc] hdr_vdc = {'Accept': self.VDC_MIME_TYPE} e_vdc = self.connection.request(link_vdc, headers=hdr_vdc).object dc_link_vdc = get_href(e_vdc, 'location') dc_link_repo = get_href(repo, 'datacenter') if dc_link_vdc.split("/")[-1] == dc_link_repo.split("/")[-1]: # Filter the template in case we don't have it yet url_templates = get_href(repo, 'virtualmachinetemplates') hypervisor_type = e_vdc.findtext('hypervisorType') params = {'hypervisorTypeName': hypervisor_type} headers = {'Accept': self.VMTPLS_MIME_TYPE} templates = self.connection.request(url_templates, params, headers=headers).object for templ in templates.findall('virtualMachineTemplate'): # Avoid duplicated templates id_template = templ.findtext('id') ids = [image.id for image in images] if id_template not in ids: images.append(self._to_nodeimage(templ, self, get_href(repo, 'edit'))) return images def list_locations(self): """ Return list of locations where the user has access to. :return: the list of :class:`NodeLocation` available for the current user :rtype: ``list`` of :class:`NodeLocation` """ return list(self.connection.cache['locations'].keys()) def list_nodes(self, location=None): """ List all nodes. :param location: Filter the groups by location (optional) :type location: a :class:`NodeLocation` instance. :return: List of node objects :rtype: ``list`` of :class:`Node` """ nodes = [] for group in self.ex_list_groups(location): nodes.extend(group.nodes) return nodes def list_sizes(self, location=None): """ List sizes on a provider. Abiquo does not work with sizes. However, this method returns a list of predefined ones (copied from :class:`DummyNodeDriver` but without price neither bandwidth) to help the users to create their own. If you call the method :class:`AbiquoNodeDriver.create_node` with the size informed, it will just override the 'ram' value of the 'image' template. So it is no too much usefull work with sizes... :return: The list of sizes :rtype: ``list`` of :class:`NodeSizes` """ return [ NodeSize(id=1, name='Small', ram=128, disk=4, bandwidth=None, price=None, driver=self), NodeSize(id=2, name='Medium', ram=512, disk=16, bandwidth=None, price=None, driver=self), NodeSize(id=3, name='Big', ram=4096, disk=32, bandwidth=None, price=None, driver=self), NodeSize(id=4, name="XXL Big", ram=4096 * 2, disk=32 * 4, bandwidth=None, price=None, driver=self) ] def reboot_node(self, node): """ Reboot a node. :param node: The node to be rebooted :type node: :class:`Node` :return: True if the reboot was successful, otherwise False :rtype: ``bool`` """ reboot_uri = node.extra['uri_id'] + '/action/reset' reboot_hdr = {'Accept': self.AR_MIME_TYPE} res = self.connection.async_request(action=reboot_uri, method='POST', headers=reboot_hdr) return res.async_success() # ------------------------- # Extenstion methods # ------------------------- def _ex_connection_class_kwargs(self): """ Set the endpoint as an extra :class:`AbiquoConnection` argument. According to Connection code, the "url" argument should be parsed properly to connection. :return: ``dict`` of :class:`AbiquoConnection` input arguments """ return {'url': self.endpoint} def _deploy_remote(self, e_vm): """ Asynchronous call to create the node. """ # -------------------------------------------------------- # Deploy the Node # -------------------------------------------------------- # prepare the element that forces the deploy vm_task = ET.Element('virtualmachinetask') force_deploy = ET.SubElement(vm_task, 'forceEnterpriseSoftLimits') force_deploy.text = 'True' # Prepare the headers headers = {'Accept': self.AR_MIME_TYPE, 'Content-type': self.VM_TASK_MIME_TYPE} link_deploy = get_href(e_vm, 'deploy') res = self.connection.async_request(action=link_deploy, method='POST', data=tostring(vm_task), headers=headers) if not res.async_success(): raise LibcloudError('Could not run the node', self) def _to_location(self, vdc, dc, driver): """ Generates the :class:`NodeLocation` class. """ identifier = vdc.findtext('id') name = vdc.findtext('name') country = dc.findtext('name') return NodeLocation(identifier, name, country, driver) def _to_node(self, vm, driver): """ Generates the :class:`Node` class. """ identifier = vm.findtext('id') name = vm.findtext('label') state = AbiquoResponse.NODE_STATE_MAP[vm.findtext('state')] link_image = get_href(vm, 'virtualmachinetemplate') link_hdr = {'Accept': self.VMTPL_MIME_TYPE} image_element = self.connection.request(link_image, headers=link_hdr).object repo_link = get_href(image_element, 'datacenterrepository') image = self._to_nodeimage(image_element, self, repo_link) # Fill the 'ips' data private_ips = [] public_ips = [] nics_hdr = {'Accept': self.NICS_MIME_TYPE} nics_element = self.connection.request(get_href(vm, 'nics'), headers=nics_hdr).object for nic in nics_element.findall('nic'): ip = nic.findtext('ip') for link in nic.findall('link'): rel = link.attrib['rel'] if rel == 'privatenetwork': private_ips.append(ip) elif rel in ['publicnetwork', 'externalnetwork', 'unmanagednetwork']: public_ips.append(ip) extra = {'uri_id': get_href(vm, 'edit')} if vm.find('vdrpIp') is not None: extra['vdrp_ip'] = vm.findtext('vdrpIP') extra['vdrp_port'] = vm.findtext('vdrpPort') return Node(identifier, name, state, public_ips, private_ips, driver, image=image, extra=extra) def _to_nodeimage(self, template, driver, repo): """ Generates the :class:`NodeImage` class. """ identifier = template.findtext('id') name = template.findtext('name') url = get_href(template, 'edit') hdreqd = template.findtext('hdRequired') extra = {'repo': repo, 'url': url, 'hdrequired': hdreqd} return NodeImage(identifier, name, driver, extra) def _get_locations(self, location=None): """ Returns the locations as a generator. """ if location is not None: yield location else: for loc in self.list_locations(): yield loc def _get_enterprise_id(self): """ Returns the identifier of the logged user's enterprise. """ return self.connection.cache['enterprise'].findtext('id') def _define_create_node_location(self, **kwargs): """ Search for a location where to create the node. Based on 'create_node' **kwargs argument, decide in which location will be created. """ # First, get image location if 'image' not in kwargs: error = "'image' parameter is mandatory" raise LibcloudError(error, self) image = kwargs['image'] # Get the location argument location = None if 'location' in kwargs: location = kwargs['location'] if location not in self.list_locations(): raise LibcloudError('Location does not exist') # Check if the image is compatible with any of the locations or # the input location loc = None target_loc = None for candidate_loc in self._get_locations(location): link_vdc = self.connection.cache['locations'][candidate_loc] hdr_vdc = {'Accept': self.VDC_MIME_TYPE} e_vdc = self.connection.request(link_vdc, headers=hdr_vdc).object for img in self.list_images(candidate_loc): if img.id == image.id: loc = e_vdc target_loc = candidate_loc break if loc is None: error = 'The image can not be used in any location' raise LibcloudError(error, self) return loc, target_loc def _define_create_node_group(self, xml_loc, loc, **kwargs): """ Search for a group where to create the node. If we can not find any group, create it into argument 'location' """ if 'group_name' not in kwargs: group_name = NodeGroup.DEFAULT_GROUP_NAME else: group_name = kwargs['group_name'] # We search if the group is already defined into the location groups_link = get_href(xml_loc, 'virtualappliances') groups_hdr = {'Accept': self.VAPPS_MIME_TYPE} vapps_element = self.connection.request(groups_link, headers=groups_hdr).object target_group = None for vapp in vapps_element.findall('virtualAppliance'): if vapp.findtext('name') == group_name: uri_vapp = get_href(vapp, 'edit') return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp) # target group not found: create it. Since it is an extension of # the basic 'libcloud' functionality, we try to be as flexible as # possible. if target_group is None: return self.ex_create_group(group_name, loc) def _define_create_node_node(self, group, **kwargs): """ Defines the node before to create. In Abiquo, you first need to 'register' or 'define' the node in the API before to create it into the target hypervisor. """ vm = ET.Element('virtualMachine') if 'name' in kwargs: vmname = ET.SubElement(vm, 'label') vmname.text = kwargs['name'] attrib = {'type': self.VMTPL_MIME_TYPE, 'rel': 'virtualmachinetemplate', 'href': kwargs['image'].extra['url']} ET.SubElement(vm, 'link', attrib=attrib) headers = {'Accept': self.NODE_MIME_TYPE, 'Content-type': self.NODE_MIME_TYPE} if 'size' in kwargs: # Override the 'NodeSize' data ram = ET.SubElement(vm, 'ram') ram.text = str(kwargs['size'].ram) # Create the virtual machine nodes_link = group.uri + '/virtualmachines' vm = self.connection.request(nodes_link, data=tostring(vm), headers=headers, method='POST').object edit_vm = get_href(vm, 'edit') headers = {'Accept': self.NODE_MIME_TYPE} return self.connection.request(edit_vm, headers=headers).object class NodeGroup(object): """ Group of virtual machines that can be managed together All :class:`Node`s in Abiquo must be defined inside a Virtual Appliance. We offer a way to handle virtual appliances (called NodeGroup to maintain some kind of name conventions here) inside the :class:`AbiquoNodeDriver` without breaking compatibility of the rest of libcloud API. If the user does not want to handle groups, all the virtual machines will be created inside a group named 'libcloud' """ DEFAULT_GROUP_NAME = 'libcloud' def __init__(self, driver, name=DEFAULT_GROUP_NAME, nodes=[], uri=''): """ Initialize a new group object. """ self.driver = driver self.name = name self.nodes = nodes self.uri = uri def __repr__(self): return (('') % (self.name, ",".join(map(str, self.nodes)))) def destroy(self): """ Destroys the group delegating the execution to :class:`AbiquoNodeDriver`. """ return self.driver.ex_destroy_group(self) apache-libcloud-2.2.1/libcloud/compute/drivers/joyent.py0000664000175000017500000001714312701023453023232 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Joyent Cloud (http://www.joyentcloud.com) driver. """ import base64 try: import simplejson as json except: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.types import LibcloudError from libcloud.compute.providers import Provider from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.compute.types import NodeState, InvalidCredsError from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize from libcloud.utils.networking import is_private_subnet API_HOST_SUFFIX = '.api.joyentcloud.com' API_VERSION = '~6.5' NODE_STATE_MAP = { 'provisioning': NodeState.PENDING, 'running': NodeState.RUNNING, 'stopping': NodeState.TERMINATED, 'stopped': NodeState.TERMINATED, 'deleted': NodeState.TERMINATED } VALID_REGIONS = [ 'us-east-1', 'us-east-2', 'us-east-3', 'us-west-1', 'us-sw-1', 'eu-ams-1' ] DEFAULT_REGION = 'us-east-1' class JoyentResponse(JsonResponse): """ Joyent response class. """ valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_error(self): if self.status == httplib.UNAUTHORIZED: data = self.parse_body() raise InvalidCredsError(data['code'] + ': ' + data['message']) return self.body def success(self): return self.status in self.valid_response_codes class JoyentConnection(ConnectionUserAndKey): """ Joyent connection class. """ responseCls = JoyentResponse allow_insecure = False def add_default_headers(self, headers): headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json; charset=UTF-8' headers['X-Api-Version'] = API_VERSION user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class JoyentNodeDriver(NodeDriver): """ Joyent node driver class. """ type = Provider.JOYENT name = 'Joyent' website = 'http://www.joyentcloud.com' connectionCls = JoyentConnection features = {'create_node': ['generates_password']} def __init__(self, key, secret=None, secure=True, host=None, port=None, region=DEFAULT_REGION, **kwargs): # Location is here for backward compatibility reasons if 'location' in kwargs: region = kwargs['location'] if region not in VALID_REGIONS: msg = 'Invalid region: "%s". Valid region: %s' raise LibcloudError(msg % (region, ', '.join(VALID_REGIONS)), driver=self) super(JoyentNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs) self.connection.host = region + API_HOST_SUFFIX def list_images(self): result = self.connection.request('/my/datasets').object images = [] for value in result: extra = {'type': value['type'], 'urn': value['urn'], 'os': value['os'], 'default': value['default']} image = NodeImage(id=value['id'], name=value['name'], driver=self.connection.driver, extra=extra) images.append(image) return images def list_sizes(self): result = self.connection.request('/my/packages').object sizes = [] for value in result: size = NodeSize(id=value['name'], name=value['name'], ram=value['memory'], disk=value['disk'], bandwidth=None, price=0.0, driver=self.connection.driver) sizes.append(size) return sizes def list_nodes(self): result = self.connection.request('/my/machines').object nodes = [] for value in result: node = self._to_node(value) nodes.append(node) return nodes def reboot_node(self, node): data = json.dumps({'action': 'reboot'}) result = self.connection.request('/my/machines/%s' % (node.id), data=data, method='POST') return result.status == httplib.ACCEPTED def destroy_node(self, node): result = self.connection.request('/my/machines/%s' % (node.id), method='DELETE') return result.status == httplib.NO_CONTENT def create_node(self, **kwargs): name = kwargs['name'] size = kwargs['size'] image = kwargs['image'] data = json.dumps({'name': name, 'package': size.id, 'dataset': image.id}) result = self.connection.request('/my/machines', data=data, method='POST') return self._to_node(result.object) def ex_stop_node(self, node): """ Stop node :param node: The node to be stopped :type node: :class:`Node` :rtype: ``bool`` """ data = json.dumps({'action': 'stop'}) result = self.connection.request('/my/machines/%s' % (node.id), data=data, method='POST') return result.status == httplib.ACCEPTED def ex_start_node(self, node): """ Start node :param node: The node to be stopped :type node: :class:`Node` :rtype: ``bool`` """ data = json.dumps({'action': 'start'}) result = self.connection.request('/my/machines/%s' % (node.id), data=data, method='POST') return result.status == httplib.ACCEPTED def ex_get_node(self, node_id): """ Return a Node object based on a node ID. :param node_id: ID of the node :type node_id: ``str`` :return: A Node object for the node :rtype: :class:`Node` """ result = self.connection.request('/my/machines/%s' % (node_id)) return self._to_node(result.object) def _to_node(self, data): state = NODE_STATE_MAP[data['state']] public_ips = [] private_ips = [] extra = {} for ip in data['ips']: if is_private_subnet(ip): private_ips.append(ip) else: public_ips.append(ip) if 'credentials' in data['metadata']: extra['password'] = data['metadata']['credentials']['root'] node = Node(id=data['id'], name=data['name'], state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra) return node apache-libcloud-2.2.1/libcloud/compute/drivers/ktucloud.py0000664000175000017500000000702612701023453023553 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeImage, NodeSize from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver class KTUCloudNodeDriver(CloudStackNodeDriver): "Driver for KTUCloud Compute platform." EMPTY_DISKOFFERINGID = '0' type = Provider.KTUCLOUD name = 'KTUCloud' website = 'https://ucloudbiz.olleh.com/' def list_images(self, location=None): args = { 'templatefilter': 'executable' } if location is not None: args['zoneid'] = location.id imgs = self._sync_request(command='listAvailableProductTypes', method='GET') images = [] for img in imgs['producttypes']: images.append( NodeImage( img['serviceofferingid'], img['serviceofferingdesc'], self, {'hypervisor': '', 'format': '', 'os': img['templatedesc'], 'templateid': img['templateid'], 'zoneid': img['zoneid']} ) ) return images def list_sizes(self, location=None): szs = self._sync_request('listAvailableProductTypes') sizes = [] for sz in szs['producttypes']: diskofferingid = sz.get('diskofferingid', self.EMPTY_DISKOFFERINGID) sizes.append(NodeSize( diskofferingid, sz['diskofferingdesc'], 0, 0, 0, 0, self) ) return sizes def create_node(self, name, size, image, location=None, **kwargs): params = {'displayname': name, 'serviceofferingid': image.id, 'templateid': str(image.extra['templateid']), 'zoneid': str(image.extra['zoneid'])} usageplantype = kwargs.pop('usageplantype', None) if usageplantype is None: params['usageplantype'] = 'hourly' else: params['usageplantype'] = usageplantype if size.id != self.EMPTY_DISKOFFERINGID: params['diskofferingid'] = size.id result = self._async_request( command='deployVirtualMachine', params=params, method='GET') node = result['virtualmachine'] return Node( id=node['id'], name=node['displayname'], state=self.NODE_STATE_MAP[node['state']], public_ips=[], private_ips=[], driver=self, extra={ 'zoneid': image.extra['zoneid'], 'ip_addresses': [], 'forwarding_rules': [], } ) apache-libcloud-2.2.1/libcloud/compute/drivers/bluebox.py0000664000175000017500000001650012705460761023371 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ libcloud driver for the Blue Box Blocks API This driver implements all libcloud functionality for the Blue Box Blocks API. Blue Box home page http://bluebox.net Blue Box API documentation https://boxpanel.bluebox .net/public/the_vault/index.php/Blocks_API """ import copy import base64 from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import b from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState, InvalidCredsError from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey # Current end point for Blue Box API. BLUEBOX_API_HOST = "boxpanel.bluebox.net" # The API doesn't currently expose all of the required values for libcloud, # so we simply list what's available right now, along with all of the various # attributes that are needed by libcloud. BLUEBOX_INSTANCE_TYPES = { '1gb': { 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae', 'name': 'Block 1GB Virtual Server', 'ram': 1024, 'disk': 20, 'cpu': 0.5 }, '2gb': { 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092', 'name': 'Block 2GB Virtual Server', 'ram': 2048, 'disk': 25, 'cpu': 1 }, '4gb': { 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58', 'name': 'Block 4GB Virtual Server', 'ram': 4096, 'disk': 50, 'cpu': 2 }, '8gb': { 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251', 'name': 'Block 8GB Virtual Server', 'ram': 8192, 'disk': 100, 'cpu': 4 } } RAM_PER_CPU = 2048 NODE_STATE_MAP = {'queued': NodeState.PENDING, 'building': NodeState.PENDING, 'running': NodeState.RUNNING, 'error': NodeState.TERMINATED, 'unknown': NodeState.UNKNOWN} class BlueboxResponse(JsonResponse): def parse_error(self): if int(self.status) == 401: if not self.body: raise InvalidCredsError(str(self.status) + ': ' + self.error) else: raise InvalidCredsError(self.body) return self.body class BlueboxNodeSize(NodeSize): def __init__(self, id, name, cpu, ram, disk, price, driver): self.id = id self.name = name self.cpu = cpu self.ram = ram self.disk = disk self.price = price self.driver = driver def __repr__(self): return (( '') % (self.id, self.name, self.cpu, self.ram, self.disk, self.price, self.driver.name)) class BlueboxConnection(ConnectionUserAndKey): """ Connection class for the Bluebox driver """ host = BLUEBOX_API_HOST secure = True responseCls = BlueboxResponse allow_insecure = False def add_default_headers(self, headers): user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64) return headers class BlueboxNodeDriver(NodeDriver): """ Bluebox Blocks node driver """ connectionCls = BlueboxConnection type = Provider.BLUEBOX api_name = 'bluebox' name = 'Bluebox Blocks' website = 'http://bluebox.net' features = {'create_node': ['ssh_key', 'password']} def list_nodes(self): result = self.connection.request('/api/blocks.json') return [self._to_node(i) for i in result.object] def list_sizes(self, location=None): sizes = [] for key, values in list(BLUEBOX_INSTANCE_TYPES.items()): attributes = copy.deepcopy(values) attributes.update({'price': self._get_size_price(size_id=key)}) sizes.append(BlueboxNodeSize(driver=self.connection.driver, **attributes)) return sizes def list_images(self, location=None): result = self.connection.request('/api/block_templates.json') images = [] for image in result.object: images.extend([self._to_image(image)]) return images def create_node(self, **kwargs): headers = {'Content-Type': 'application/x-www-form-urlencoded'} name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] auth = self._get_and_check_auth(kwargs.get('auth')) data = { 'hostname': name, 'product': size.id, 'template': image.id } ssh = None password = None if isinstance(auth, NodeAuthSSHKey): ssh = auth.pubkey data.update(ssh_public_key=ssh) elif isinstance(auth, NodeAuthPassword): password = auth.password data.update(password=password) if "ex_username" in kwargs: data.update(username=kwargs["ex_username"]) if not ssh and not password: raise Exception("SSH public key or password required.") params = urlencode(data) result = self.connection.request('/api/blocks.json', headers=headers, data=params, method='POST') node = self._to_node(result.object) if getattr(auth, "generated", False): node.extra['password'] = auth.password return node def destroy_node(self, node): url = '/api/blocks/%s.json' % (node.id) result = self.connection.request(url, method='DELETE') return result.status == 200 def list_locations(self): return [NodeLocation(0, "Blue Box Seattle US", 'US', self)] def reboot_node(self, node): url = '/api/blocks/%s/reboot.json' % (node.id) result = self.connection.request(url, method="PUT") return result.status == 200 def _to_node(self, vm): state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)] n = Node(id=vm['id'], name=vm['hostname'], state=state, public_ips=[ip['address'] for ip in vm['ips']], private_ips=[], extra={'storage': vm['storage'], 'cpu': vm['cpu']}, driver=self.connection.driver) return n def _to_image(self, image): image = NodeImage(id=image['id'], name=image['description'], driver=self.connection.driver) return image apache-libcloud-2.2.1/libcloud/compute/drivers/gogrid.py0000664000175000017500000003536212701023453023200 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GoGrid driver """ import time import hashlib import copy from libcloud.utils.py3 import b from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation STATE = { "Starting": NodeState.PENDING, "On": NodeState.RUNNING, "On/Saving": NodeState.RUNNING, "Off": NodeState.PENDING, "Restarting": NodeState.REBOOTING, "Saving": NodeState.PENDING, "Restoring": NodeState.PENDING, } GOGRID_INSTANCE_TYPES = { '512MB': {'id': '512MB', 'name': '512MB', 'ram': 512, 'disk': 30, 'bandwidth': None}, '1GB': {'id': '1GB', 'name': '1GB', 'ram': 1024, 'disk': 60, 'bandwidth': None}, '2GB': {'id': '2GB', 'name': '2GB', 'ram': 2048, 'disk': 120, 'bandwidth': None}, '4GB': {'id': '4GB', 'name': '4GB', 'ram': 4096, 'disk': 240, 'bandwidth': None}, '8GB': {'id': '8GB', 'name': '8GB', 'ram': 8192, 'disk': 480, 'bandwidth': None}, '16GB': {'id': '16GB', 'name': '16GB', 'ram': 16384, 'disk': 960, 'bandwidth': None}, '24GB': {'id': '24GB', 'name': '24GB', 'ram': 24576, 'disk': 960, 'bandwidth': None}, } class GoGridNode(Node): # Generating uuid based on public ip to get around missing id on # create_node in gogrid api # # Used public ip since it is not mutable and specified at create time, # so uuid of node should not change after add is completed def get_uuid(self): return hashlib.sha1( b("%s:%s" % (self.public_ips, self.driver.type)) ).hexdigest() class GoGridNodeDriver(BaseGoGridDriver, NodeDriver): """ GoGrid node driver """ connectionCls = GoGridConnection type = Provider.GOGRID api_name = 'gogrid' name = 'GoGrid' website = 'http://www.gogrid.com/' features = {"create_node": ["generates_password"]} _instance_types = GOGRID_INSTANCE_TYPES def __init__(self, *args, **kwargs): """ @inherits: :class:`NodeDriver.__init__` """ super(GoGridNodeDriver, self).__init__(*args, **kwargs) def _get_state(self, element): try: return STATE[element['state']['name']] except: pass return NodeState.UNKNOWN def _get_ip(self, element): return element.get('ip').get('ip') def _get_id(self, element): return element.get('id') def _to_node(self, element, password=None): state = self._get_state(element) ip = self._get_ip(element) id = self._get_id(element) n = GoGridNode(id=id, name=element['name'], state=state, public_ips=[ip], private_ips=[], extra={'ram': element.get('ram').get('name'), 'description': element.get('description', '')}, driver=self.connection.driver) if password: n.extra['password'] = password return n def _to_image(self, element): n = NodeImage(id=element['id'], name=element['friendlyName'], driver=self.connection.driver) return n def _to_images(self, object): return [self._to_image(el) for el in object['list']] def _to_location(self, element): location = NodeLocation(id=element['id'], name=element['name'], country="US", driver=self.connection.driver) return location def _to_locations(self, object): return [self._to_location(el) for el in object['list']] def list_images(self, location=None): params = {} if location is not None: params["datacenter"] = location.id images = self._to_images( self.connection.request('/api/grid/image/list', params).object) return images def list_nodes(self): """ @inherits: :class:`NodeDriver.list_nodes` :rtype: ``list`` of :class:`GoGridNode` """ passwords_map = {} res = self._server_list() try: for password in self._password_list()['list']: try: passwords_map[password['server']['id']] = \ password['password'] except KeyError: pass except InvalidCredsError: # some gogrid API keys don't have permission to access the # password list. pass return [self._to_node(el, passwords_map.get(el.get('id'))) for el in res['list']] def reboot_node(self, node): """ @inherits: :class:`NodeDriver.reboot_node` :type node: :class:`GoGridNode` """ id = node.id power = 'restart' res = self._server_power(id, power) if not res.success(): raise Exception(res.parse_error()) return True def destroy_node(self, node): """ @inherits: :class:`NodeDriver.reboot_node` :type node: :class:`GoGridNode` """ id = node.id res = self._server_delete(id) if not res.success(): raise Exception(res.parse_error()) return True def _server_list(self): return self.connection.request('/api/grid/server/list').object def _password_list(self): return self.connection.request('/api/support/password/list').object def _server_power(self, id, power): # power in ['start', 'stop', 'restart'] params = {'id': id, 'power': power} return self.connection.request("/api/grid/server/power", params, method='POST') def _server_delete(self, id): params = {'id': id} return self.connection.request("/api/grid/server/delete", params, method='POST') def _get_first_ip(self, location=None): ips = self.ex_list_ips(public=True, assigned=False, location=location) try: return ips[0].ip except IndexError: raise LibcloudError('No public unassigned IPs left', GoGridNodeDriver) def list_sizes(self, location=None): sizes = [] for key, values in self._instance_types.items(): attributes = copy.deepcopy(values) attributes.update({'price': self._get_size_price(size_id=key)}) sizes.append(NodeSize(driver=self.connection.driver, **attributes)) return sizes def list_locations(self): locations = self._to_locations( self.connection.request('/api/common/lookup/list', params={'lookup': 'ip.datacenter'}).object) return locations def ex_create_node_nowait(self, **kwargs): """Don't block until GoGrid allocates id for a node but return right away with id == None. The existence of this method is explained by the fact that GoGrid assigns id to a node only few minutes after creation. :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword size: The size of resources allocated to this node . (required) :type size: :class:`NodeSize` :keyword image: OS Image to boot on node. (required) :type image: :class:`NodeImage` :keyword ex_description: Description of a Node :type ex_description: ``str`` :keyword ex_ip: Public IP address to use for a Node. If not specified, first available IP address will be picked :type ex_ip: ``str`` :rtype: :class:`GoGridNode` """ name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] try: ip = kwargs['ex_ip'] except KeyError: ip = self._get_first_ip(kwargs.get('location')) params = {'name': name, 'image': image.id, 'description': kwargs.get('ex_description', ''), 'server.ram': size.id, 'ip': ip} object = self.connection.request('/api/grid/server/add', params=params, method='POST').object node = self._to_node(object['list'][0]) return node def create_node(self, **kwargs): """Create a new GoGird node @inherits: :class:`NodeDriver.create_node` :keyword ex_description: Description of a Node :type ex_description: ``str`` :keyword ex_ip: Public IP address to use for a Node. If not specified, first available IP address will be picked :type ex_ip: ``str`` :rtype: :class:`GoGridNode` """ node = self.ex_create_node_nowait(**kwargs) timeout = 60 * 20 waittime = 0 interval = 2 * 60 while node.id is None and waittime < timeout: nodes = self.list_nodes() for i in nodes: if i.public_ips[0] == node.public_ips[0] and i.id is not None: return i waittime += interval time.sleep(interval) if id is None: raise Exception( "Wasn't able to wait for id allocation for the node %s" % str(node)) return node def ex_save_image(self, node, name): """Create an image for node. Please refer to GoGrid documentation to get info how prepare a node for image creation: http://wiki.gogrid.com/wiki/index.php/MyGSI :keyword node: node to use as a base for image :type node: :class:`GoGridNode` :keyword name: name for new image :type name: ``str`` :rtype: :class:`NodeImage` """ params = {'server': node.id, 'friendlyName': name} object = self.connection.request('/api/grid/image/save', params=params, method='POST').object return self._to_images(object)[0] def ex_edit_node(self, **kwargs): """Change attributes of a node. :keyword node: node to be edited (required) :type node: :class:`GoGridNode` :keyword size: new size of a node (required) :type size: :class:`NodeSize` :keyword ex_description: new description of a node :type ex_description: ``str`` :rtype: :class:`Node` """ node = kwargs['node'] size = kwargs['size'] params = {'id': node.id, 'server.ram': size.id} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] object = self.connection.request('/api/grid/server/edit', params=params).object return self._to_node(object['list'][0]) def ex_edit_image(self, **kwargs): """Edit metadata of a server image. :keyword image: image to be edited (required) :type image: :class:`NodeImage` :keyword public: should be the image public (required) :type public: ``bool`` :keyword ex_description: description of the image (optional) :type ex_description: ``str`` :keyword name: name of the image :type name: ``str`` :rtype: :class:`NodeImage` """ image = kwargs['image'] public = kwargs['public'] params = {'id': image.id, 'isPublic': str(public).lower()} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] if 'name' in kwargs: params['friendlyName'] = kwargs['name'] object = self.connection.request('/api/grid/image/edit', params=params).object return self._to_image(object['list'][0]) def ex_list_ips(self, **kwargs): """Return list of IP addresses assigned to the account. :keyword public: set to True to list only public IPs or False to list only private IPs. Set to None or not specify at all not to filter by type :type public: ``bool`` :keyword assigned: set to True to list only addresses assigned to servers, False to list unassigned addresses and set to None or don't set at all not no filter by state :type assigned: ``bool`` :keyword location: filter IP addresses by location :type location: :class:`NodeLocation` :rtype: ``list`` of :class:`GoGridIpAddress` """ params = {} if "public" in kwargs and kwargs["public"] is not None: params["ip.type"] = {True: "Public", False: "Private"}[kwargs["public"]] if "assigned" in kwargs and kwargs["assigned"] is not None: params["ip.state"] = {True: "Assigned", False: "Unassigned"}[kwargs["assigned"]] if "location" in kwargs and kwargs['location'] is not None: params['datacenter'] = kwargs['location'].id ips = self._to_ips( self.connection.request('/api/grid/ip/list', params=params).object) return ips apache-libcloud-2.2.1/libcloud/compute/drivers/cloudstack.py0000664000175000017500000047427313153541406024076 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import sys import base64 import warnings from libcloud.utils.py3 import b from libcloud.utils.py3 import urlparse from libcloud.compute.providers import Provider from libcloud.common.cloudstack import CloudStackDriverMixIn from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot from libcloud.compute.base import KeyPair from libcloud.compute.types import NodeState, LibcloudError from libcloud.compute.types import KeyPairDoesNotExistError, StorageVolumeState from libcloud.utils.networking import is_private_subnet # Utility functions def transform_int_or_unlimited(value): try: return int(value) except ValueError: e = sys.exc_info()[1] if str(value).lower() == 'unlimited': return -1 raise e """ Define the extra dictionary for specific resources """ RESOURCE_EXTRA_ATTRIBUTES_MAP = { 'network': { 'broadcast_domain_type': { 'key_name': 'broadcastdomaintype', 'transform_func': str }, 'traffic_type': { 'key_name': 'traffictype', 'transform_func': str }, 'zone_name': { 'key_name': 'zonename', 'transform_func': str }, 'network_offering_name': { 'key_name': 'networkofferingname', 'transform_func': str }, 'network_offeringdisplay_text': { 'key_name': 'networkofferingdisplaytext', 'transform_func': str }, 'network_offering_availability': { 'key_name': 'networkofferingavailability', 'transform_func': str }, 'is_system': { 'key_name': 'issystem', 'transform_func': str }, 'state': { 'key_name': 'state', 'transform_func': str }, 'dns1': { 'key_name': 'dns1', 'transform_func': str }, 'dns2': { 'key_name': 'dns2', 'transform_func': str }, 'type': { 'key_name': 'type', 'transform_func': str }, 'acl_type': { 'key_name': 'acltype', 'transform_func': str }, 'subdomain_access': { 'key_name': 'subdomainaccess', 'transform_func': str }, 'network_domain': { 'key_name': 'networkdomain', 'transform_func': str }, 'physical_network_id': { 'key_name': 'physicalnetworkid', 'transform_func': str }, 'can_use_for_deploy': { 'key_name': 'canusefordeploy', 'transform_func': str }, 'gateway': { 'key_name': 'gateway', 'transform_func': str }, 'netmask': { 'key_name': 'netmask', 'transform_func': str }, 'vpc_id': { 'key_name': 'vpcid', 'transform_func': str }, 'project_id': { 'key_name': 'projectid', 'transform_func': str } }, 'node': { 'haenable': { 'key_name': 'haenable', 'transform_func': str }, 'zone_id': { 'key_name': 'zoneid', 'transform_func': str }, 'zone_name': { 'key_name': 'zonename', 'transform_func': str }, 'key_name': { 'key_name': 'keypair', 'transform_func': str }, 'password': { 'key_name': 'password', 'transform_func': str }, 'image_id': { 'key_name': 'templateid', 'transform_func': str }, 'image_name': { 'key_name': 'templatename', 'transform_func': str }, 'template_display_text': { 'key_name': 'templatdisplaytext', 'transform_func': str }, 'password_enabled': { 'key_name': 'passwordenabled', 'transform_func': str }, 'size_id': { 'key_name': 'serviceofferingid', 'transform_func': str }, 'size_name': { 'key_name': 'serviceofferingname', 'transform_func': str }, 'root_device_id': { 'key_name': 'rootdeviceid', 'transform_func': str }, 'root_device_type': { 'key_name': 'rootdevicetype', 'transform_func': str }, 'hypervisor': { 'key_name': 'hypervisor', 'transform_func': str }, 'project': { 'key_name': 'project', 'transform_func': str }, 'project_id': { 'key_name': 'projectid', 'transform_func': str }, 'nics:': { 'key_name': 'nic', 'transform_func': list } }, 'volume': { 'created': { 'key_name': 'created', 'transform_func': str }, 'device_id': { 'key_name': 'deviceid', 'transform_func': transform_int_or_unlimited }, 'instance_id': { 'key_name': 'virtualmachineid', 'transform_func': str }, 'serviceoffering_id': { 'key_name': 'serviceofferingid', 'transform_func': str }, 'state': { 'key_name': 'state', 'transform_func': str }, 'volume_type': { 'key_name': 'type', 'transform_func': str }, 'zone_id': { 'key_name': 'zoneid', 'transform_func': str }, 'zone_name': { 'key_name': 'zonename', 'transform_func': str } }, 'vpc': { 'created': { 'key_name': 'created', 'transform_func': str }, 'domain': { 'key_name': 'domain', 'transform_func': str }, 'domain_id': { 'key_name': 'domainid', 'transform_func': transform_int_or_unlimited }, 'network_domain': { 'key_name': 'networkdomain', 'transform_func': str }, 'state': { 'key_name': 'state', 'transform_func': str }, 'vpc_offering_id': { 'key_name': 'vpcofferingid', 'transform_func': str }, 'zone_name': { 'key_name': 'zonename', 'transform_func': str }, 'zone_id': { 'key_name': 'zoneid', 'transform_func': str } }, 'project': { 'account': {'key_name': 'account', 'transform_func': str}, 'cpuavailable': {'key_name': 'cpuavailable', 'transform_func': transform_int_or_unlimited}, 'cpulimit': {'key_name': 'cpulimit', 'transform_func': transform_int_or_unlimited}, 'cputotal': {'key_name': 'cputotal', 'transform_func': transform_int_or_unlimited}, 'domain': {'key_name': 'domain', 'transform_func': str}, 'domainid': {'key_name': 'domainid', 'transform_func': str}, 'ipavailable': {'key_name': 'ipavailable', 'transform_func': transform_int_or_unlimited}, 'iplimit': {'key_name': 'iplimit', 'transform_func': transform_int_or_unlimited}, 'iptotal': {'key_name': 'iptotal', 'transform_func': transform_int_or_unlimited}, 'memoryavailable': {'key_name': 'memoryavailable', 'transform_func': transform_int_or_unlimited}, 'memorylimit': {'key_name': 'memorylimit', 'transform_func': transform_int_or_unlimited}, 'memorytotal': {'key_name': 'memorytotal', 'transform_func': transform_int_or_unlimited}, 'networkavailable': {'key_name': 'networkavailable', 'transform_func': transform_int_or_unlimited}, 'networklimit': {'key_name': 'networklimit', 'transform_func': transform_int_or_unlimited}, 'networktotal': {'key_name': 'networktotal', 'transform_func': transform_int_or_unlimited}, 'primarystorageavailable': { 'key_name': 'primarystorageavailable', 'transform_func': transform_int_or_unlimited}, 'primarystoragelimit': {'key_name': 'primarystoragelimit', 'transform_func': transform_int_or_unlimited}, 'primarystoragetotal': {'key_name': 'primarystoragetotal', 'transform_func': transform_int_or_unlimited}, 'secondarystorageavailable': { 'key_name': 'secondarystorageavailable', 'transform_func': transform_int_or_unlimited}, 'secondarystoragelimit': { 'key_name': 'secondarystoragelimit', 'transform_func': transform_int_or_unlimited}, 'secondarystoragetotal': { 'key_name': 'secondarystoragetotal', 'transform_func': transform_int_or_unlimited}, 'snapshotavailable': {'key_name': 'snapshotavailable', 'transform_func': transform_int_or_unlimited}, 'snapshotlimit': {'key_name': 'snapshotlimit', 'transform_func': transform_int_or_unlimited}, 'snapshottotal': {'key_name': 'snapshottotal', 'transform_func': transform_int_or_unlimited}, 'state': {'key_name': 'state', 'transform_func': str}, 'tags': {'key_name': 'tags', 'transform_func': str}, 'templateavailable': {'key_name': 'templateavailable', 'transform_func': transform_int_or_unlimited}, 'templatelimit': {'key_name': 'templatelimit', 'transform_func': transform_int_or_unlimited}, 'templatetotal': {'key_name': 'templatetotal', 'transform_func': transform_int_or_unlimited}, 'vmavailable': {'key_name': 'vmavailable', 'transform_func': transform_int_or_unlimited}, 'vmlimit': {'key_name': 'vmlimit', 'transform_func': transform_int_or_unlimited}, 'vmrunning': {'key_name': 'vmrunning', 'transform_func': transform_int_or_unlimited}, 'vmtotal': {'key_name': 'vmtotal', 'transform_func': transform_int_or_unlimited}, 'volumeavailable': {'key_name': 'volumeavailable', 'transform_func': transform_int_or_unlimited}, 'volumelimit': {'key_name': 'volumelimit', 'transform_func': transform_int_or_unlimited}, 'volumetotal': {'key_name': 'volumetotal', 'transform_func': transform_int_or_unlimited}, 'vpcavailable': {'key_name': 'vpcavailable', 'transform_func': transform_int_or_unlimited}, 'vpclimit': {'key_name': 'vpclimit', 'transform_func': transform_int_or_unlimited}, 'vpctotal': {'key_name': 'vpctotal', 'transform_func': transform_int_or_unlimited} }, 'nic': { 'secondary_ip': { 'key_name': 'secondaryip', 'transform_func': list } }, 'vpngateway': { 'for_display': { 'key_name': 'fordisplay', 'transform_func': str }, 'project': { 'key_name': 'project', 'transform_func': str }, 'project_id': { 'key_name': 'projectid', 'transform_func': str }, 'removed': { 'key_name': 'removed', 'transform_func': str } }, 'vpncustomergateway': { 'account': { 'key_name': 'account', 'transform_func': str }, 'domain': { 'key_name': 'domain', 'transform_func': str }, 'domain_id': { 'key_name': 'domainid', 'transform_func': str }, 'dpd': { 'key_name': 'dpd', 'transform_func': bool }, 'esp_lifetime': { 'key_name': 'esplifetime', 'transform_func': transform_int_or_unlimited }, 'ike_lifetime': { 'key_name': 'ikelifetime', 'transform_func': transform_int_or_unlimited }, 'name': { 'key_name': 'name', 'transform_func': str } }, 'vpnconnection': { 'account': { 'key_name': 'account', 'transform_func': str }, 'domain': { 'key_name': 'domain', 'transform_func': str }, 'domain_id': { 'key_name': 'domainid', 'transform_func': str }, 'for_display': { 'key_name': 'fordisplay', 'transform_func': str }, 'project': { 'key_name': 'project', 'transform_func': str }, 'project_id': { 'key_name': 'projectid', 'transform_func': str } } } class CloudStackNode(Node): """ Subclass of Node so we can expose our extension methods. """ def ex_allocate_public_ip(self): """ Allocate a public IP and bind it to this node. """ return self.driver.ex_allocate_public_ip(self) def ex_release_public_ip(self, address): """ Release a public IP that this node holds. """ return self.driver.ex_release_public_ip(self, address) def ex_create_ip_forwarding_rule(self, address, protocol, start_port, end_port=None): """ Add a NAT/firewall forwarding rule for a port or ports. """ return self.driver.ex_create_ip_forwarding_rule(node=self, address=address, protocol=protocol, start_port=start_port, end_port=end_port) def ex_create_port_forwarding_rule(self, address, private_port, public_port, protocol, public_end_port=None, private_end_port=None, openfirewall=True): """ Add a port forwarding rule for port or ports. """ return self.driver.ex_create_port_forwarding_rule( node=self, address=address, private_port=private_port, public_port=public_port, protocol=protocol, public_end_port=public_end_port, private_end_port=private_end_port, openfirewall=openfirewall) def ex_delete_ip_forwarding_rule(self, rule): """ Delete a port forwarding rule. """ return self.driver.ex_delete_ip_forwarding_rule(node=self, rule=rule) def ex_delete_port_forwarding_rule(self, rule): """ Delete a NAT/firewall rule. """ return self.driver.ex_delete_port_forwarding_rule(node=self, rule=rule) def ex_start(self): """ Starts a stopped virtual machine. """ return self.driver.ex_start(node=self) def ex_stop(self): """ Stops a running virtual machine. """ return self.driver.ex_stop(node=self) class CloudStackAddress(object): """ A public IP address. :param id: UUID of the Public IP :type id: ``str`` :param address: The public IP address :type address: ``str`` :param associated_network_id: The ID of the network where this address has been associated with :type associated_network_id: ``str`` :param vpc_id: VPC the ip belongs to :type vpc_id: ``str`` :param virtualmachine_id: The ID of virutal machine this address is assigned to :type virtualmachine_id: ``str`` """ def __init__(self, id, address, driver, associated_network_id=None, vpc_id=None, virtualmachine_id=None): self.id = id self.address = address self.driver = driver self.associated_network_id = associated_network_id self.vpc_id = vpc_id self.virtualmachine_id = virtualmachine_id def release(self): self.driver.ex_release_public_ip(address=self) def __str__(self): return self.address def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackFirewallRule(object): """ A firewall rule. """ def __init__(self, id, address, cidr_list, protocol, icmp_code=None, icmp_type=None, start_port=None, end_port=None): """ A Firewall rule. @note: This is a non-standard extension API, and only works for CloudStack. :param id: Firewall Rule ID :type id: ``int`` :param address: External IP address :type address: :class:`CloudStackAddress` :param cidr_list: cidr list :type cidr_list: ``str`` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param icmp_code: Error code for this icmp message :type icmp_code: ``int`` :param icmp_type: Type of the icmp message being sent :type icmp_type: ``int`` :param start_port: start of port range :type start_port: ``int`` :param end_port: end of port range :type end_port: ``int`` :rtype: :class:`CloudStackFirewallRule` """ self.id = id self.address = address self.cidr_list = cidr_list self.protocol = protocol self.icmp_code = icmp_code self.icmp_type = icmp_type self.start_port = start_port self.end_port = end_port def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackEgressFirewallRule(object): """ A egress firewall rule. """ def __init__(self, id, network_id, cidr_list, protocol, icmp_code=None, icmp_type=None, start_port=None, end_port=None): """ A egress firewall rule. @note: This is a non-standard extension API, and only works for CloudStack. :param id: Firewall Rule ID :type id: ``int`` :param network_id: the id network network for the egress firwall services :type network_id: ``str`` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param cidr_list: cidr list :type cidr_list: ``str`` :param icmp_code: Error code for this icmp message :type icmp_code: ``int`` :param icmp_type: Type of the icmp message being sent :type icmp_type: ``int`` :param start_port: start of port range :type start_port: ``int`` :param end_port: end of port range :type end_port: ``int`` :rtype: :class:`CloudStackEgressFirewallRule` """ self.id = id self.network_id = network_id self.cidr_list = cidr_list self.protocol = protocol self.icmp_code = icmp_code self.icmp_type = icmp_type self.start_port = start_port self.end_port = end_port def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackIPForwardingRule(object): """ A NAT/firewall forwarding rule. """ def __init__(self, node, id, address, protocol, start_port, end_port=None): """ A NAT/firewall forwarding rule. @note: This is a non-standard extension API, and only works for CloudStack. :param node: Node for rule :type node: :class:`Node` :param id: Rule ID :type id: ``int`` :param address: External IP address :type address: :class:`CloudStackAddress` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param start_port: Start port for the rule :type start_port: ``int`` :param end_port: End port for the rule :type end_port: ``int`` :rtype: :class:`CloudStackIPForwardingRule` """ self.node = node self.id = id self.address = address self.protocol = protocol self.start_port = start_port self.end_port = end_port def delete(self): self.node.ex_delete_ip_forwarding_rule(rule=self) def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackPortForwardingRule(object): """ A Port forwarding rule for Source NAT. """ def __init__(self, node, rule_id, address, protocol, public_port, private_port, public_end_port=None, private_end_port=None, network_id=None): """ A Port forwarding rule for Source NAT. @note: This is a non-standard extension API, and only works for EC2. :param node: Node for rule :type node: :class:`Node` :param rule_id: Rule ID :type rule_id: ``int`` :param address: External IP address :type address: :class:`CloudStackAddress` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param public_port: External port for rule (or start port if public_end_port is also provided) :type public_port: ``int`` :param private_port: Internal node port for rule (or start port if public_end_port is also provided) :type private_port: ``int`` :param public_end_port: End of external port range :type public_end_port: ``int`` :param private_end_port: End of internal port range :type private_end_port: ``int`` :param network_id: The network of the vm the Port Forwarding rule will be created for. Required when public Ip address is not associated with any Guest network yet (VPC case) :type network_id: ``str`` :rtype: :class:`CloudStackPortForwardingRule` """ self.node = node self.id = rule_id self.address = address self.protocol = protocol self.public_port = public_port self.public_end_port = public_end_port self.private_port = private_port self.private_end_port = private_end_port def delete(self): self.node.ex_delete_port_forwarding_rule(rule=self) def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackNetworkACLList(object): """ a Network ACL for the given VPC """ def __init__(self, acl_id, name, vpc_id, driver, description=None): """ a Network ACL for the given VPC @note: This is a non-standard extension API, and only works for Cloudstack. :param acl_id: ACL ID :type acl_id: ``int`` :param name: Name of the network ACL List :type name: ``str`` :param vpc_id: Id of the VPC associated with this network ACL List :type vpc_id: ``string`` :param description: Description of the network ACL List :type description: ``str`` :rtype: :class:`CloudStackNetworkACLList` """ self.id = acl_id self.name = name self.vpc_id = vpc_id self.driver = driver self.description = description def __repr__(self): return (('') % (self.id, self.name, self.vpc_id, self.driver.name, self.description)) class CloudStackNetworkACL(object): """ a ACL rule in the given network (the network has to belong to VPC) """ def __init__(self, id, protocol, acl_id, action, cidr_list, start_port, end_port, traffic_type=None): """ a ACL rule in the given network (the network has to belong to VPC) @note: This is a non-standard extension API, and only works for Cloudstack. :param id: the ID of the ACL Item :type id ``int`` :param protocol: the protocol for the ACL rule. Valid values are TCP/UDP/ICMP/ALL or valid protocol number :type protocol: ``string`` :param acl_id: Name of the network ACL List :type acl_id: ``str`` :param action: scl entry action, allow or deny :type action: ``string`` :param cidr_list: the cidr list to allow traffic from/to :type cidr_list: ``str`` :param start_port: the starting port of ACL :type start_port: ``str`` :param end_port: the ending port of ACL :type end_port: ``str`` :param traffic_type: the traffic type for the ACL,can be Ingress or Egress, defaulted to Ingress if not specified :type traffic_type: ``str`` :rtype: :class:`CloudStackNetworkACL` """ self.id = id self.protocol = protocol self.acl_id = acl_id self.action = action self.cidr_list = cidr_list self.start_port = start_port self.end_port = end_port self.traffic_type = traffic_type def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackDiskOffering(object): """ A disk offering within CloudStack. """ def __init__(self, id, name, size, customizable): self.id = id self.name = name self.size = size self.customizable = customizable def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackNetwork(object): """ Class representing a CloudStack Network. """ def __init__(self, displaytext, name, networkofferingid, id, zoneid, driver, extra=None): self.displaytext = displaytext self.name = name self.networkofferingid = networkofferingid self.id = id self.zoneid = zoneid self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.displaytext, self.name, self.networkofferingid, self.id, self.zoneid, self.driver.name)) class CloudStackNetworkOffering(object): """ Class representing a CloudStack Network Offering. """ def __init__(self, name, display_text, guest_ip_type, id, service_offering_id, for_vpc, driver, extra=None): self.display_text = display_text self.name = name self.guest_ip_type = guest_ip_type self.id = id self.service_offering_id = service_offering_id self.for_vpc = for_vpc self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.name, self.display_text, self.guest_ip_type, self.service_offering_id, self.for_vpc, self.driver.name)) class CloudStackNic(object): """ Class representing a CloudStack Network Interface. """ def __init__(self, id, network_id, net_mask, gateway, ip_address, is_default, mac_address, driver, extra=None): self.id = id self.network_id = network_id self.net_mask = net_mask self.gateway = gateway self.ip_address = ip_address self.is_default = is_default self.mac_address = mac_address self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.network_id, self.net_mask, self.gateway, self.ip_address, self.is_default, self.mac_address, self.driver.name)) def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id class CloudStackVPC(object): """ Class representing a CloudStack VPC. """ def __init__(self, name, vpc_offering_id, id, cidr, driver, zone_id=None, display_text=None, extra=None): self.display_text = display_text self.name = name self.vpc_offering_id = vpc_offering_id self.id = id self.zone_id = zone_id self.cidr = cidr self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.name, self.vpc_offering_id, self.id, self.cidr, self.driver.name, self.zone_id, self.display_text)) class CloudStackVPCOffering(object): """ Class representing a CloudStack VPC Offering. """ def __init__(self, name, display_text, id, driver, extra=None): self.name = name self.display_text = display_text self.id = id self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.name, self.display_text, self.id, self.driver.name)) class CloudStackVpnGateway(object): """ Class representing a CloudStack VPN Gateway. """ def __init__(self, id, account, domain, domain_id, public_ip, vpc_id, driver, extra=None): self.id = id self.account = account self.domain = domain self.domain_id = domain_id self.public_ip = public_ip self.vpc_id = vpc_id self.driver = driver self.extra = extra or {} @property def vpc(self): for vpc in self.driver.ex_list_vpcs(): if self.vpc_id == vpc.id: return vpc raise LibcloudError('VPC with id=%s not found' % self.vpc_id) def delete(self): return self.driver.ex_delete_vpn_gateway(vpn_gateway=self) def __repr__(self): return (('') % (self.account, self.domain, self.domain_id, self.id, self.public_ip, self.vpc_id, self.driver.name)) class CloudStackVpnCustomerGateway(object): """ Class representing a CloudStack VPN Customer Gateway. """ def __init__(self, id, cidr_list, esp_policy, gateway, ike_policy, ipsec_psk, driver, extra=None): self.id = id self.cidr_list = cidr_list self.esp_policy = esp_policy self.gateway = gateway self.ike_policy = ike_policy self.ipsec_psk = ipsec_psk self.driver = driver self.extra = extra or {} def delete(self): return self.driver.ex_delete_vpn_customer_gateway( vpn_customer_gateway=self) def __repr__(self): return (('') % (self.id, self.cidr_list, self.esp_policy, self.gateway, self.ike_policy, self.ipsec_psk, self.driver.name)) class CloudStackVpnConnection(object): """ Class representing a CloudStack VPN Connection. """ def __init__(self, id, passive, vpn_customer_gateway_id, vpn_gateway_id, state, driver, extra=None): self.id = id self.passive = passive self.vpn_customer_gateway_id = vpn_customer_gateway_id self.vpn_gateway_id = vpn_gateway_id self.state = state self.driver = driver self.extra = extra or {} @property def vpn_customer_gateway(self): try: return self.driver.ex_list_vpn_customer_gateways( id=self.vpn_customer_gateway_id)[0] except IndexError: raise LibcloudError('VPN Customer Gateway with id=%s not found' % self.vpn_customer_gateway_id) @property def vpn_gateway(self): try: return self.driver.ex_list_vpn_gateways(id=self.vpn_gateway_id)[0] except IndexError: raise LibcloudError('VPN Gateway with id=%s not found' % self.vpn_gateway_id) def delete(self): return self.driver.ex_delete_vpn_connection(vpn_connection=self) def __repr__(self): return (('') % (self.id, self.passive, self.vpn_customer_gateway_id, self.vpn_gateway_id, self.state, self.driver.name)) class CloudStackRouter(object): """ Class representing a CloudStack Router. """ def __init__(self, id, name, state, public_ip, vpc_id, driver): self.id = id self.name = name self.state = state self.public_ip = public_ip self.vpc_id = vpc_id self.driver = driver def __repr__(self): return (('') % (self.id, self.name, self.state, self.public_ip, self.vpc_id, self.driver.name)) class CloudStackProject(object): """ Class representing a CloudStack Project. """ def __init__(self, id, name, display_text, driver, extra=None): self.id = id self.name = name self.display_text = display_text self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.display_text, self.name, self.driver.name)) class CloudStackAffinityGroup(object): """ Class representing a CloudStack AffinityGroup. """ def __init__(self, id, account, description, domain, domainid, name, group_type, virtualmachine_ids): """ A CloudStack Affinity Group. @note: This is a non-standard extension API, and only works for CloudStack. :param id: CloudStack Affinity Group ID :type id: ``str`` :param account: An account for the affinity group. Must be used with domainId. :type account: ``str`` :param description: optional description of the affinity group :type description: ``str`` :param domain: the domain name of the affinity group :type domain: ``str`` :param domainid: domain ID of the account owning the affinity group :type domainid: ``str`` :param name: name of the affinity group :type name: ``str`` :param group_type: the type of the affinity group :type group_type: :class:`CloudStackAffinityGroupType` :param virtualmachine_ids: virtual machine Ids associated with this affinity group :type virtualmachine_ids: ``str`` :rtype: :class:`CloudStackAffinityGroup` """ self.id = id self.account = account self.description = description self.domain = domain self.domainid = domainid self.name = name self.type = group_type self.virtualmachine_ids = virtualmachine_ids def __repr__(self): return (('') % (self.id, self.name, self.type)) class CloudStackAffinityGroupType(object): """ Class representing a CloudStack AffinityGroupType. """ def __init__(self, type_name): """ A CloudStack Affinity Group Type. @note: This is a non-standard extension API, and only works for CloudStack. :param type_name: the type of the affinity group :type type_name: ``str`` :rtype: :class:`CloudStackAffinityGroupType` """ self.type = type_name def __repr__(self): return (('') % self.type) class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver): """ Driver for the CloudStack API. :cvar host: The host where the API can be reached. :cvar path: The path where the API can be reached. :cvar async_poll_frequency: How often (in seconds) to poll for async job completion. :type async_poll_frequency: ``int``""" name = 'CloudStack' api_name = 'cloudstack' website = 'http://cloudstack.org/' type = Provider.CLOUDSTACK features = {'create_node': ['generates_password']} NODE_STATE_MAP = { 'Running': NodeState.RUNNING, 'Starting': NodeState.REBOOTING, 'Migrating': NodeState.MIGRATING, 'Stopped': NodeState.STOPPED, 'Stopping': NodeState.PENDING, 'Destroyed': NodeState.TERMINATED, 'Expunging': NodeState.PENDING, 'Error': NodeState.TERMINATED } VOLUME_STATE_MAP = { 'Creating': StorageVolumeState.CREATING, 'Destroying': StorageVolumeState.DELETING, 'Expunging': StorageVolumeState.DELETING, 'Destroy': StorageVolumeState.DELETED, 'Expunged': StorageVolumeState.DELETED, 'Allocated': StorageVolumeState.AVAILABLE, 'Ready': StorageVolumeState.AVAILABLE, 'Snapshotting': StorageVolumeState.BACKUP, 'UploadError': StorageVolumeState.ERROR, 'Migrating': StorageVolumeState.MIGRATING } def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None, url=None, *args, **kwargs): """ :inherits: :class:`NodeDriver.__init__` :param host: The host where the API can be reached. (required) :type host: ``str`` :param path: The path where the API can be reached. (required) :type path: ``str`` :param url: Full URL to the API endpoint. Mutually exclusive with host and path argument. :type url: ``str`` """ if url: parsed = urlparse.urlparse(url) path = parsed.path scheme = parsed.scheme split = parsed.netloc.split(':') if len(split) == 1: # No port provided, use the default one host = parsed.netloc port = 443 if scheme == 'https' else 80 else: host = split[0] port = int(split[1]) else: host = host if host else self.host path = path if path else self.path if path is not None: self.path = path if host is not None: self.host = host if (self.type == Provider.CLOUDSTACK) and (not host or not path): raise Exception('When instantiating CloudStack driver directly ' 'you also need to provide url or host and path ' 'argument') super(CloudStackNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port) def list_images(self, location=None): args = { 'templatefilter': 'executable' } if location is not None: args['zoneid'] = location.id imgs = self._sync_request(command='listTemplates', params=args, method='GET') images = [] for img in imgs.get('template', []): extra = {'hypervisor': img['hypervisor'], 'format': img['format'], 'os': img['ostypename'], 'displaytext': img['displaytext']} size = img.get('size', None) if size is not None: extra.update({'size': img['size']}) images.append(NodeImage( id=img['id'], name=img['name'], driver=self.connection.driver, extra=extra)) return images def list_locations(self): """ :rtype ``list`` of :class:`NodeLocation` """ locs = self._sync_request('listZones') locations = [] for loc in locs['zone']: location = NodeLocation(str(loc['id']), loc['name'], 'Unknown', self) locations.append(location) return locations def list_nodes(self, project=None, location=None): """ @inherits: :class:`NodeDriver.list_nodes` :keyword project: Limit nodes returned to those configured under the defined project. :type project: :class:`.CloudStackProject` :keyword location: Limit nodes returned to those in the defined location. :type location: :class:`.NodeLocation` :rtype: ``list`` of :class:`CloudStackNode` """ args = {} if project: args['projectid'] = project.id if location is not None: args['zoneid'] = location.id vms = self._sync_request('listVirtualMachines', params=args) addrs = self._sync_request('listPublicIpAddresses', params=args) port_forwarding_rules = self._sync_request('listPortForwardingRules') ip_forwarding_rules = self._sync_request('listIpForwardingRules') public_ips_map = {} for addr in addrs.get('publicipaddress', []): if 'virtualmachineid' not in addr: continue vm_id = str(addr['virtualmachineid']) if vm_id not in public_ips_map: public_ips_map[vm_id] = {} public_ips_map[vm_id][addr['ipaddress']] = addr['id'] nodes = [] for vm in vms.get('virtualmachine', []): public_ips = public_ips_map.get(str(vm['id']), {}).keys() public_ips = list(public_ips) node = self._to_node(data=vm, public_ips=public_ips) addresses = public_ips_map.get(str(vm['id']), {}).items() addresses = [CloudStackAddress(id=address_id, address=address, driver=node.driver) for address, address_id in addresses] node.extra['ip_addresses'] = addresses rules = [] for addr in addresses: for r in ip_forwarding_rules.get('ipforwardingrule', []): if str(r['virtualmachineid']) == node.id: rule = CloudStackIPForwardingRule(node, r['id'], addr, r['protocol'] .upper(), r['startport'], r['endport']) rules.append(rule) node.extra['ip_forwarding_rules'] = rules rules = [] for r in port_forwarding_rules.get('portforwardingrule', []): if str(r['virtualmachineid']) == node.id: addr = [CloudStackAddress(id=a['id'], address=a['ipaddress'], driver=node.driver) for a in addrs.get('publicipaddress', []) if a['ipaddress'] == r['ipaddress']] rule = CloudStackPortForwardingRule(node, r['id'], addr[0], r['protocol'].upper(), r['publicport'], r['privateport'], r['publicendport'], r['privateendport']) if not addr[0].address in node.public_ips: node.public_ips.append(addr[0].address) rules.append(rule) node.extra['port_forwarding_rules'] = rules nodes.append(node) return nodes def ex_get_node(self, node_id, project=None): """ Return a Node object based on its ID. :param node_id: The id of the node :type node_id: ``str`` :keyword project: Limit node returned to those configured under the defined project. :type project: :class:`.CloudStackProject` :rtype: :class:`CloudStackNode` """ list_nodes_args = {'id': node_id} list_ips_args = {} if project: list_nodes_args['projectid'] = project.id list_ips_args['projectid'] = project.id vms = self._sync_request('listVirtualMachines', params=list_nodes_args) if not vms: raise Exception("Node '%s' not found" % node_id) vm = vms['virtualmachine'][0] addrs = self._sync_request('listPublicIpAddresses', params=list_ips_args) public_ips = {} for addr in addrs.get('publicipaddress', []): if 'virtualmachineid' not in addr: continue public_ips[addr['ipaddress']] = addr['id'] node = self._to_node(data=vm, public_ips=list(public_ips.keys())) addresses = [CloudStackAddress(id=address_id, address=address, driver=node.driver) for address, address_id in public_ips.items()] node.extra['ip_addresses'] = addresses rules = [] list_fw_rules = {'virtualmachineid': node_id} for addr in addresses: result = self._sync_request('listIpForwardingRules', params=list_fw_rules) for r in result.get('ipforwardingrule', []): if str(r['virtualmachineid']) == node.id: rule = CloudStackIPForwardingRule(node, r['id'], addr, r['protocol'] .upper(), r['startport'], r['endport']) rules.append(rule) node.extra['ip_forwarding_rules'] = rules rules = [] public_ips = self.ex_list_public_ips() result = self._sync_request('listPortForwardingRules', params=list_fw_rules) for r in result.get('portforwardingrule', []): if str(r['virtualmachineid']) == node.id: addr = [a for a in public_ips if a.address == r['ipaddress']] rule = CloudStackPortForwardingRule(node, r['id'], addr[0], r['protocol'].upper(), r['publicport'], r['privateport'], r['publicendport'], r['privateendport']) if not addr[0].address in node.public_ips: node.public_ips.append(addr[0].address) rules.append(rule) node.extra['port_forwarding_rules'] = rules return node def list_sizes(self, location=None): """ :rtype ``list`` of :class:`NodeSize` """ szs = self._sync_request(command='listServiceOfferings', method='GET') sizes = [] for sz in szs['serviceoffering']: extra = {'cpu': sz['cpunumber']} sizes.append(NodeSize(sz['id'], sz['name'], sz['memory'], 0, 0, 0, self, extra=extra)) return sizes def create_node(self, **kwargs): """ Create a new node @inherits: :class:`NodeDriver.create_node` :keyword networks: Optional list of networks to launch the server into. :type networks: ``list`` of :class:`.CloudStackNetwork` :keyword project: Optional project to create the new node under. :type project: :class:`.CloudStackProject` :keyword diskoffering: Optional disk offering to add to the new node. :type diskoffering: :class:`.CloudStackDiskOffering` :keyword ex_keyname: Name of existing keypair :type ex_keyname: ``str`` :keyword ex_userdata: String containing user data :type ex_userdata: ``str`` :keyword ex_security_groups: List of security groups to assign to the node :type ex_security_groups: ``list`` of ``str`` :keyword ex_displayname: String containing instance display name :type ex_displayname: ``str`` :keyword ex_ip_address: String with ipaddress for the default nic :type ex_ip_address: ``str`` :keyword ex_start_vm: Boolean to specify to start VM after creation Default Cloudstack behaviour is to start a VM, if not specified. :type ex_start_vm: ``bool`` :keyword ex_rootdisksize: String with rootdisksize for the template :type ex_rootdisksize: ``str`` :keyword ex_affinity_groups: List of affinity groups to assign to the node :type ex_affinity_groups: ``list`` of :class:`.CloudStackAffinityGroup` :rtype: :class:`.CloudStackNode` """ server_params = self._create_args_to_params(None, **kwargs) data = self._async_request(command='deployVirtualMachine', params=server_params, method='GET')['virtualmachine'] node = self._to_node(data=data) return node def _create_args_to_params(self, node, **kwargs): server_params = {} # TODO: Refactor and use "kwarg_to_server_params" map name = kwargs.get('name', None) size = kwargs.get('size', None) image = kwargs.get('image', None) location = kwargs.get('location', None) networks = kwargs.get('networks', None) project = kwargs.get('project', None) diskoffering = kwargs.get('diskoffering', None) ex_key_name = kwargs.get('ex_keyname', None) ex_user_data = kwargs.get('ex_userdata', None) ex_security_groups = kwargs.get('ex_security_groups', None) ex_displayname = kwargs.get('ex_displayname', None) ex_ip_address = kwargs.get('ex_ip_address', None) ex_start_vm = kwargs.get('ex_start_vm', None) ex_rootdisksize = kwargs.get('ex_rootdisksize', None) ex_affinity_groups = kwargs.get('ex_affinity_groups', None) if name: server_params['name'] = name if ex_displayname: server_params['displayname'] = ex_displayname if size: server_params['serviceofferingid'] = size.id if image: server_params['templateid'] = image.id if location: server_params['zoneid'] = location.id else: # Use a default location server_params['zoneid'] = self.list_locations()[0].id if networks: networks = ','.join([str(network.id) for network in networks]) server_params['networkids'] = networks if project: server_params['projectid'] = project.id if diskoffering: server_params['diskofferingid'] = diskoffering.id if ex_key_name: server_params['keypair'] = ex_key_name if ex_user_data: ex_user_data = base64.b64encode(b(ex_user_data)).decode('ascii') server_params['userdata'] = ex_user_data if ex_security_groups: ex_security_groups = ','.join(ex_security_groups) server_params['securitygroupnames'] = ex_security_groups if ex_ip_address: server_params['ipaddress'] = ex_ip_address if ex_rootdisksize: server_params['rootdisksize'] = ex_rootdisksize if ex_start_vm is not None: server_params['startvm'] = ex_start_vm if ex_affinity_groups: affinity_group_ids = ','.join(ag.id for ag in ex_affinity_groups) server_params['affinitygroupids'] = affinity_group_ids return server_params def destroy_node(self, node, ex_expunge=False): """ @inherits: :class:`NodeDriver.reboot_node` :type node: :class:`CloudStackNode` :keyword ex_expunge: If true is passed, the vm is expunged immediately. False by default. :type ex_expunge: ``bool`` :rtype: ``bool`` """ args = { 'id': node.id, } if ex_expunge: args['expunge'] = ex_expunge self._async_request(command='destroyVirtualMachine', params=args, method='GET') return True def reboot_node(self, node): """ @inherits: :class:`NodeDriver.reboot_node` :type node: :class:`CloudStackNode` :rtype: ``bool`` """ self._async_request(command='rebootVirtualMachine', params={'id': node.id}, method='GET') return True def ex_start(self, node): """ Starts/Resumes a stopped virtual machine :type node: :class:`CloudStackNode` :param id: The ID of the virtual machine (required) :type id: ``str`` :param hostid: destination Host ID to deploy the VM to parameter available for root admin only :type hostid: ``str`` :rtype ``str`` """ res = self._async_request(command='startVirtualMachine', params={'id': node.id}, method='GET') return res['virtualmachine']['state'] def ex_stop(self, node): """ Stops/Suspends a running virtual machine :param node: Node to stop. :type node: :class:`CloudStackNode` :rtype: ``str`` """ res = self._async_request(command='stopVirtualMachine', params={'id': node.id}, method='GET') return res['virtualmachine']['state'] def ex_list_disk_offerings(self): """ Fetch a list of all available disk offerings. :rtype: ``list`` of :class:`CloudStackDiskOffering` """ diskOfferings = [] diskOfferResponse = self._sync_request(command='listDiskOfferings', method='GET') for diskOfferDict in diskOfferResponse.get('diskoffering', ()): diskOfferings.append( CloudStackDiskOffering( id=diskOfferDict['id'], name=diskOfferDict['name'], size=diskOfferDict['disksize'], customizable=diskOfferDict['iscustomized'])) return diskOfferings def ex_list_networks(self, project=None): """ List the available networks :param project: Optional project the networks belongs to. :type project: :class:`.CloudStackProject` :rtype ``list`` of :class:`CloudStackNetwork` """ args = {} if project is not None: args['projectid'] = project.id res = self._sync_request(command='listNetworks', params=args, method='GET') nets = res.get('network', []) networks = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network'] for net in nets: extra = self._get_extra_dict(net, extra_map) if 'tags' in net: extra['tags'] = self._get_resource_tags(net['tags']) networks.append(CloudStackNetwork( net['displaytext'], net['name'], net['networkofferingid'], net['id'], net['zoneid'], self, extra=extra)) return networks def ex_list_network_offerings(self): """ List the available network offerings :rtype ``list`` of :class:`CloudStackNetworkOffering` """ res = self._sync_request(command='listNetworkOfferings', method='GET') netoffers = res.get('networkoffering', []) networkofferings = [] for netoffer in netoffers: networkofferings.append(CloudStackNetworkOffering( netoffer['name'], netoffer['displaytext'], netoffer['guestiptype'], netoffer['id'], netoffer['serviceofferingid'], netoffer['forvpc'], self)) return networkofferings def ex_create_network(self, display_text, name, network_offering, location, gateway=None, netmask=None, network_domain=None, vpc_id=None, project_id=None): """ Creates a Network, only available in advanced zones. :param display_text: the display text of the network :type display_text: ``str`` :param name: the name of the network :type name: ``str`` :param network_offering: NetworkOffering object :type network_offering: :class:'CloudStackNetworkOffering` :param location: Zone object :type location: :class:`NodeLocation` :param gateway: Optional, the Gateway of this network :type gateway: ``str`` :param netmask: Optional, the netmask of this network :type netmask: ``str`` :param network_domain: Optional, the DNS domain of the network :type network_domain: ``str`` :param vpc_id: Optional, the VPC id the network belongs to :type vpc_id: ``str`` :param project_id: Optional, the project id the networks belongs to :type project_id: ``str`` :rtype: :class:`CloudStackNetwork` """ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network'] args = { 'displaytext': display_text, 'name': name, 'networkofferingid': network_offering.id, 'zoneid': location.id, } if gateway is not None: args['gateway'] = gateway if netmask is not None: args['netmask'] = netmask if network_domain is not None: args['networkdomain'] = network_domain if vpc_id is not None: args['vpcid'] = vpc_id if project_id is not None: args['projectid'] = project_id """ Cloudstack allows for duplicate network names, this should be handled in the code leveraging libcloud As there could be use cases for duplicate names. e.g. management from ROOT level""" # for net in self.ex_list_networks(): # if name == net.name: # raise LibcloudError('This network name already exists') result = self._sync_request(command='createNetwork', params=args, method='GET') result = result['network'] extra = self._get_extra_dict(result, extra_map) network = CloudStackNetwork(display_text, name, network_offering.id, result['id'], location.id, self, extra=extra) return network def ex_delete_network(self, network, force=None): """ Deletes a Network, only available in advanced zones. :param network: The network :type network: :class: 'CloudStackNetwork' :param force: Force deletion of the network? :type force: ``bool`` :rtype: ``bool`` """ args = {'id': network.id, 'forced': force} self._async_request(command='deleteNetwork', params=args, method='GET') return True def ex_list_vpc_offerings(self): """ List the available vpc offerings :rtype ``list`` of :class:`CloudStackVPCOffering` """ res = self._sync_request(command='listVPCOfferings', method='GET') vpcoffers = res.get('vpcoffering', []) vpcofferings = [] for vpcoffer in vpcoffers: vpcofferings.append(CloudStackVPCOffering( vpcoffer['name'], vpcoffer['displaytext'], vpcoffer['id'], self)) return vpcofferings def ex_list_vpcs(self, project=None): """ List the available VPCs :keyword project: Optional project under which VPCs are present. :type project: :class:`.CloudStackProject` :rtype ``list`` of :class:`CloudStackVPC` """ args = {} if project is not None: args['projectid'] = project.id res = self._sync_request(command='listVPCs', params=args, method='GET') vpcs = res.get('vpc', []) networks = [] for vpc in vpcs: networks.append(CloudStackVPC( vpc['name'], vpc['vpcofferingid'], vpc['id'], vpc['cidr'], self, vpc['zoneid'], vpc['displaytext'])) return networks def ex_list_routers(self, vpc_id=None): """ List routers :rtype ``list`` of :class:`CloudStackRouter` """ args = {} if vpc_id is not None: args['vpcid'] = vpc_id res = self._sync_request(command='listRouters', params=args, method='GET') rts = res.get('router', []) routers = [] for router in rts: routers.append(CloudStackRouter( router['id'], router['name'], router['state'], router['publicip'], router['vpcid'], self)) return routers def ex_create_vpc(self, cidr, display_text, name, vpc_offering, zone_id, network_domain=None): """ Creates a VPC, only available in advanced zones. :param cidr: the cidr of the VPC. All VPC guest networks' cidrs should be within this CIDR :type display_text: ``str`` :param display_text: the display text of the VPC :type display_text: ``str`` :param name: the name of the VPC :type name: ``str`` :param vpc_offering: the ID of the VPC offering :type vpc_offering: :class:'CloudStackVPCOffering` :param zone_id: the ID of the availability zone :type zone_id: ``str`` :param network_domain: Optional, the DNS domain of the network :type network_domain: ``str`` :rtype: :class:`CloudStackVPC` """ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpc'] args = { 'cidr': cidr, 'displaytext': display_text, 'name': name, 'vpcofferingid': vpc_offering.id, 'zoneid': zone_id, } if network_domain is not None: args['networkdomain'] = network_domain result = self._sync_request(command='createVPC', params=args, method='GET') extra = self._get_extra_dict(result, extra_map) vpc = CloudStackVPC(name, vpc_offering.id, result['id'], cidr, self, zone_id, display_text, extra=extra) return vpc def ex_delete_vpc(self, vpc): """ Deletes a VPC, only available in advanced zones. :param vpc: The VPC :type vpc: :class: 'CloudStackVPC' :rtype: ``bool`` """ args = {'id': vpc.id} self._async_request(command='deleteVPC', params=args, method='GET') return True def ex_list_projects(self): """ List the available projects :rtype ``list`` of :class:`CloudStackProject` """ res = self._sync_request(command='listProjects', method='GET') projs = res.get('project', []) projects = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['project'] for proj in projs: extra = self._get_extra_dict(proj, extra_map) if 'tags' in proj: extra['tags'] = self._get_resource_tags(proj['tags']) projects.append(CloudStackProject( id=proj['id'], name=proj['name'], display_text=proj['displaytext'], driver=self, extra=extra)) return projects def create_volume(self, size, name, location=None, snapshot=None, ex_volume_type=None): """ Creates a data volume Defaults to the first location """ if ex_volume_type is None: for diskOffering in self.ex_list_disk_offerings(): if diskOffering.size == size or diskOffering.customizable: break else: raise LibcloudError( 'Disk offering with size=%s not found' % size) else: for diskOffering in self.ex_list_disk_offerings(): if diskOffering.name == ex_volume_type: if not diskOffering.customizable: size = diskOffering.size break else: raise LibcloudError( 'Volume type with name=%s not found' % ex_volume_type) if location is None: location = self.list_locations()[0] params = {'name': name, 'diskOfferingId': diskOffering.id, 'zoneId': location.id} if diskOffering.customizable: params['size'] = size requestResult = self._async_request(command='createVolume', params=params, method='GET') volumeResponse = requestResult['volume'] state = self._to_volume_state(volumeResponse) return StorageVolume(id=volumeResponse['id'], name=name, size=size, state=state, driver=self, extra=dict(name=volumeResponse['name'])) def destroy_volume(self, volume): """ :rtype: ``bool`` """ self._sync_request(command='deleteVolume', params={'id': volume.id}, method='GET') return True def attach_volume(self, node, volume, device=None): """ @inherits: :class:`NodeDriver.attach_volume` :type node: :class:`CloudStackNode` :rtype: ``bool`` """ # TODO Add handling for device name self._async_request(command='attachVolume', params={'id': volume.id, 'virtualMachineId': node.id}, method='GET') return True def detach_volume(self, volume): """ :rtype: ``bool`` """ self._async_request(command='detachVolume', params={'id': volume.id}, method='GET') return True def list_volumes(self, node=None): """ List all volumes :param node: Only return volumes for the provided node. :type node: :class:`CloudStackNode` :rtype: ``list`` of :class:`StorageVolume` """ if node: volumes = self._sync_request(command='listVolumes', params={'virtualmachineid': node.id}, method='GET') else: volumes = self._sync_request(command='listVolumes', method='GET') list_volumes = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'] for vol in volumes.get('volume', []): extra = self._get_extra_dict(vol, extra_map) if 'tags' in vol: extra['tags'] = self._get_resource_tags(vol['tags']) state = self._to_volume_state(vol) list_volumes.append(StorageVolume(id=vol['id'], name=vol['name'], size=vol['size'], state=state, driver=self, extra=extra)) return list_volumes def ex_get_volume(self, volume_id, project=None): """ Return a StorageVolume object based on its ID. :param volume_id: The id of the volume :type volume_id: ``str`` :keyword project: Limit volume returned to those configured under the defined project. :type project: :class:`.CloudStackProject` :rtype: :class:`CloudStackNode` """ args = {'id': volume_id} if project: args['projectid'] = project.id volumes = self._sync_request(command='listVolumes', params=args) if not volumes: raise Exception("Volume '%s' not found" % volume_id) vol = volumes['volume'][0] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'] extra = self._get_extra_dict(vol, extra_map) if 'tags' in vol: extra['tags'] = self._get_resource_tags(vol['tags']) state = self._to_volume_state(vol) volume = StorageVolume(id=vol['id'], name=vol['name'], state=state, size=vol['size'], driver=self, extra=extra) return volume def list_key_pairs(self, **kwargs): """ List registered key pairs. :param projectid: list objects by project :type projectid: ``str`` :param page: The page to list the keypairs from :type page: ``int`` :param keyword: List by keyword :type keyword: ``str`` :param listall: If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false :type listall: ``bool`` :param pagesize: The number of results per page :type pagesize: ``int`` :param account: List resources by account. Must be used with the domainId parameter :type account: ``str`` :param isrecursive: Defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves. :type isrecursive: ``bool`` :param fingerprint: A public key fingerprint to look for :type fingerprint: ``str`` :param name: A key pair name to look for :type name: ``str`` :param domainid: List only resources belonging to the domain specified :type domainid: ``str`` :return: A list of key par objects. :rtype: ``list`` of :class:`libcloud.compute.base.KeyPair` """ extra_args = kwargs.copy() res = self._sync_request(command='listSSHKeyPairs', params=extra_args, method='GET') key_pairs = res.get('sshkeypair', []) key_pairs = self._to_key_pairs(data=key_pairs) return key_pairs def get_key_pair(self, name): """ Retrieve a single key pair. :param name: Name of the key pair to retrieve. :type name: ``str`` :rtype: :class:`.KeyPair` """ params = {'name': name} res = self._sync_request(command='listSSHKeyPairs', params=params, method='GET') key_pairs = res.get('sshkeypair', []) if len(key_pairs) == 0: raise KeyPairDoesNotExistError(name=name, driver=self) key_pair = self._to_key_pair(data=key_pairs[0]) return key_pair def create_key_pair(self, name, **kwargs): """ Create a new key pair object. :param name: Key pair name. :type name: ``str`` :param name: Name of the keypair (required) :type name: ``str`` :param projectid: An optional project for the ssh key :type projectid: ``str`` :param domainid: An optional domainId for the ssh key. If the account parameter is used, domainId must also be used. :type domainid: ``str`` :param account: An optional account for the ssh key. Must be used with domainId. :type account: ``str`` :return: Created key pair object. :rtype: :class:`libcloud.compute.base.KeyPair` """ extra_args = kwargs.copy() params = {'name': name} params.update(extra_args) res = self._sync_request(command='createSSHKeyPair', params=params, method='GET') key_pair = self._to_key_pair(data=res['keypair']) return key_pair def import_key_pair_from_string(self, name, key_material): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :return: Imported key pair object. :rtype: :class:`libcloud.compute.base.KeyPair` """ res = self._sync_request(command='registerSSHKeyPair', params={'name': name, 'publickey': key_material}, method='GET') key_pair = self._to_key_pair(data=res['keypair']) return key_pair def delete_key_pair(self, key_pair, **kwargs): """ Delete an existing key pair. :param key_pair: Key pair object. :type key_pair: :class:`libcloud.compute.base.KeyPair` :param projectid: The project associated with keypair :type projectid: ``str`` :param domainid: The domain ID associated with the keypair :type domainid: ``str`` :param account: The account associated with the keypair. Must be used with the domainId parameter. :type account: ``str`` :return: True of False based on success of Keypair deletion :rtype: ``bool`` """ extra_args = kwargs.copy() params = {'name': key_pair.name} params.update(extra_args) res = self._sync_request(command='deleteSSHKeyPair', params=params, method='GET') return res['success'] == 'true' def ex_list_public_ips(self): """ Lists all Public IP Addresses. :rtype: ``list`` of :class:`CloudStackAddress` """ ips = [] res = self._sync_request(command='listPublicIpAddresses', method='GET') # Workaround for basic zones if not res: return ips for ip in res['publicipaddress']: ips.append(CloudStackAddress(ip['id'], ip['ipaddress'], self, ip.get('associatednetworkid', []), ip.get('vpcid'), ip.get('virtualmachineid'))) return ips def ex_allocate_public_ip(self, vpc_id=None, network_id=None, location=None): """ Allocate a public IP. :param vpc_id: VPC the ip belongs to :type vpc_id: ``str`` :param network_id: Network where this IP is connected to. :type network_id: ''str'' :param location: Zone :type location: :class:`NodeLocation` :rtype: :class:`CloudStackAddress` """ args = {} if location is not None: args['zoneid'] = location.id else: args['zoneid'] = self.list_locations()[0].id if vpc_id is not None: args['vpcid'] = vpc_id if network_id is not None: args['networkid'] = network_id addr = self._async_request(command='associateIpAddress', params=args, method='GET') addr = addr['ipaddress'] addr = CloudStackAddress(addr['id'], addr['ipaddress'], self) return addr def ex_release_public_ip(self, address): """ Release a public IP. :param address: CloudStackAddress which should be used :type address: :class:`CloudStackAddress` :rtype: ``bool`` """ res = self._async_request(command='disassociateIpAddress', params={'id': address.id}, method='GET') return res['success'] def ex_list_firewall_rules(self): """ Lists all Firewall Rules :rtype: ``list`` of :class:`CloudStackFirewallRule` """ rules = [] result = self._sync_request(command='listFirewallRules', method='GET') if result != {}: public_ips = self.ex_list_public_ips() for rule in result['firewallrule']: addr = [a for a in public_ips if a.address == rule['ipaddress']] rules.append(CloudStackFirewallRule(rule['id'], addr[0], rule['cidrlist'], rule['protocol'], rule.get('icmpcode'), rule.get('icmptype'), rule.get('startport'), rule.get('endport'))) return rules def ex_create_firewall_rule(self, address, cidr_list, protocol, icmp_code=None, icmp_type=None, start_port=None, end_port=None): """ Creates a Firewall Rule :param address: External IP address :type address: :class:`CloudStackAddress` :param cidr_list: cidr list :type cidr_list: ``str`` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param icmp_code: Error code for this icmp message :type icmp_code: ``int`` :param icmp_type: Type of the icmp message being sent :type icmp_type: ``int`` :param start_port: start of port range :type start_port: ``int`` :param end_port: end of port range :type end_port: ``int`` :rtype: :class:`CloudStackFirewallRule` """ args = { 'ipaddressid': address.id, 'cidrlist': cidr_list, 'protocol': protocol } if icmp_code is not None: args['icmpcode'] = int(icmp_code) if icmp_type is not None: args['icmptype'] = int(icmp_type) if start_port is not None: args['startport'] = int(start_port) if end_port is not None: args['endport'] = int(end_port) result = self._async_request(command='createFirewallRule', params=args, method='GET') rule = CloudStackFirewallRule(result['firewallrule']['id'], address, cidr_list, protocol, icmp_code, icmp_type, start_port, end_port) return rule def ex_delete_firewall_rule(self, firewall_rule): """ Remove a Firewall Rule. :param firewall_rule: Firewall rule which should be used :type firewall_rule: :class:`CloudStackFirewallRule` :rtype: ``bool`` """ res = self._async_request(command='deleteFirewallRule', params={'id': firewall_rule.id}, method='GET') return res['success'] def ex_list_egress_firewall_rules(self): """ Lists all egress Firewall Rules :rtype: ``list`` of :class:`CloudStackEgressFirewallRule` """ rules = [] result = self._sync_request(command='listEgressFirewallRules', method='GET') for rule in result['firewallrule']: rules.append(CloudStackEgressFirewallRule(rule['id'], rule['networkid'], rule['cidrlist'], rule['protocol'], rule.get('icmpcode'), rule.get('icmptype'), rule.get('startport'), rule.get('endport'))) return rules def ex_create_egress_firewall_rule(self, network_id, cidr_list, protocol, icmp_code=None, icmp_type=None, start_port=None, end_port=None): """ Creates a Firewall Rule :param network_id: the id network network for the egress firewall services :type network_id: ``str`` :param cidr_list: cidr list :type cidr_list: ``str`` :param protocol: TCP/IP Protocol (TCP, UDP) :type protocol: ``str`` :param icmp_code: Error code for this icmp message :type icmp_code: ``int`` :param icmp_type: Type of the icmp message being sent :type icmp_type: ``int`` :param start_port: start of port range :type start_port: ``int`` :param end_port: end of port range :type end_port: ``int`` :rtype: :class:`CloudStackEgressFirewallRule` """ args = { 'networkid': network_id, 'cidrlist': cidr_list, 'protocol': protocol } if icmp_code is not None: args['icmpcode'] = int(icmp_code) if icmp_type is not None: args['icmptype'] = int(icmp_type) if start_port is not None: args['startport'] = int(start_port) if end_port is not None: args['endport'] = int(end_port) result = self._async_request(command='createEgressFirewallRule', params=args, method='GET') rule = CloudStackEgressFirewallRule(result['firewallrule']['id'], network_id, cidr_list, protocol, icmp_code, icmp_type, start_port, end_port) return rule def ex_delete_egress_firewall_rule(self, firewall_rule): """ Remove a Firewall rule. :param egress_firewall_rule: Firewall rule which should be used :type egress_firewall_rule: :class:`CloudStackEgressFirewallRule` :rtype: ``bool`` """ res = self._async_request(command='deleteEgressFirewallRule', params={'id': firewall_rule.id}, method='GET') return res['success'] def ex_list_port_forwarding_rules(self, account=None, domain_id=None, id=None, ipaddress_id=None, is_recursive=None, keyword=None, list_all=None, network_id=None, page=None, page_size=None, project_id=None): """ Lists all Port Forwarding Rules :param account: List resources by account. Must be used with the domainId parameter :type account: ``str`` :param domain_id: List only resources belonging to the domain specified :type domain_id: ``str`` :param for_display: List resources by display flag (only root admin is eligible to pass this parameter). :type for_display: ``bool`` :param id: Lists rule with the specified ID :type id: ``str`` :param ipaddress_id: list the rule belonging to this public ip address :type ipaddress_id: ``str`` :param is_recursive: Defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves. :type is_recursive: ``bool`` :param keyword: List by keyword :type keyword: ``str`` :param list_all: If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false :type list_all: ``bool`` :param network_id: list port forwarding rules for certain network :type network_id: ``string`` :param page: The page to list the keypairs from :type page: ``int`` :param page_size: The number of results per page :type page_size: ``int`` :param project_id: list objects by project :type project_id: ``str`` :rtype: ``list`` of :class:`CloudStackPortForwardingRule` """ args = {} if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if id is not None: args['id'] = id if ipaddress_id is not None: args['ipaddressid'] = ipaddress_id if is_recursive is not None: args['isrecursive'] = is_recursive if keyword is not None: args['keyword'] = keyword if list_all is not None: args['listall'] = list_all if network_id is not None: args['networkid'] = network_id if page is not None: args['page'] = page if page_size is not None: args['pagesize'] = page_size if project_id is not None: args['projectid'] = project_id rules = [] result = self._sync_request(command='listPortForwardingRules', params=args, method='GET') if result != {}: public_ips = self.ex_list_public_ips() nodes = self.list_nodes() for rule in result['portforwardingrule']: node = [n for n in nodes if n.id == str(rule['virtualmachineid'])] addr = [a for a in public_ips if a.address == rule['ipaddress']] rules.append(CloudStackPortForwardingRule (node[0], rule['id'], addr[0], rule['protocol'], rule['publicport'], rule['privateport'], rule['publicendport'], rule['privateendport'])) return rules def ex_create_port_forwarding_rule(self, node, address, private_port, public_port, protocol, public_end_port=None, private_end_port=None, openfirewall=True, network_id=None): """ Creates a Port Forwarding Rule, used for Source NAT :param address: IP address of the Source NAT :type address: :class:`CloudStackAddress` :param private_port: Port of the virtual machine :type private_port: ``int`` :param protocol: Protocol of the rule :type protocol: ``str`` :param public_port: Public port on the Source NAT address :type public_port: ``int`` :param node: The virtual machine :type node: :class:`CloudStackNode` :param network_id: The network of the vm the Port Forwarding rule will be created for. Required when public Ip address is not associated with any Guest network yet (VPC case) :type network_id: ``string`` :rtype: :class:`CloudStackPortForwardingRule` """ args = { 'ipaddressid': address.id, 'protocol': protocol, 'privateport': int(private_port), 'publicport': int(public_port), 'virtualmachineid': node.id, 'openfirewall': openfirewall } if public_end_port: args['publicendport'] = int(public_end_port) if private_end_port: args['privateendport'] = int(private_end_port) if network_id: args['networkid'] = network_id result = self._async_request(command='createPortForwardingRule', params=args, method='GET') rule = CloudStackPortForwardingRule(node, result['portforwardingrule'] ['id'], address, protocol, public_port, private_port, public_end_port, private_end_port, network_id) node.extra['port_forwarding_rules'].append(rule) node.public_ips.append(address.address) return rule def ex_delete_port_forwarding_rule(self, node, rule): """ Remove a Port forwarding rule. :param node: Node used in the rule :type node: :class:`CloudStackNode` :param rule: Forwarding rule which should be used :type rule: :class:`CloudStackPortForwardingRule` :rtype: ``bool`` """ node.extra['port_forwarding_rules'].remove(rule) node.public_ips.remove(rule.address.address) res = self._async_request(command='deletePortForwardingRule', params={'id': rule.id}, method='GET') return res['success'] def ex_list_ip_forwarding_rules(self, account=None, domain_id=None, id=None, ipaddress_id=None, is_recursive=None, keyword=None, list_all=None, page=None, page_size=None, project_id=None, virtualmachine_id=None): """ Lists all NAT/firewall forwarding rules :param account: List resources by account. Must be used with the domainId parameter :type account: ``str`` :param domain_id: List only resources belonging to the domain specified :type domain_id: ``str`` :param id: Lists rule with the specified ID :type id: ``str`` :param ipaddress_id: list the rule belonging to this public ip address :type ipaddress_id: ``str`` :param is_recursive: Defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves. :type is_recursive: ``bool`` :param keyword: List by keyword :type keyword: ``str`` :param list_all: If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false :type list_all: ``bool`` :param page: The page to list the keypairs from :type page: ``int`` :param page_size: The number of results per page :type page_size: ``int`` :param project_id: list objects by project :type project_id: ``str`` :param virtualmachine_id: Lists all rules applied to the specified Vm :type virtualmachine_id: ``str`` :rtype: ``list`` of :class:`CloudStackIPForwardingRule` """ args = {} if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if id is not None: args['id'] = id if ipaddress_id is not None: args['ipaddressid'] = ipaddress_id if is_recursive is not None: args['isrecursive'] = is_recursive if keyword is not None: args['keyword'] = keyword if list_all is not None: args['listall'] = list_all if page is not None: args['page'] = page if page_size is not None: args['pagesize'] = page_size if project_id is not None: args['projectid'] = project_id if virtualmachine_id is not None: args['virtualmachineid'] = virtualmachine_id result = self._sync_request(command='listIpForwardingRules', params=args, method='GET') rules = [] if result != {}: public_ips = self.ex_list_public_ips() nodes = self.list_nodes() for rule in result['ipforwardingrule']: node = [n for n in nodes if n.id == str(rule['virtualmachineid'])] addr = [a for a in public_ips if a.address == rule['ipaddress']] rules.append(CloudStackIPForwardingRule (node[0], rule['id'], addr[0], rule['protocol'], rule['startport'], rule['endport'])) return rules def ex_create_ip_forwarding_rule(self, node, address, protocol, start_port, end_port=None): """ "Add a NAT/firewall forwarding rule. :param node: Node which should be used :type node: :class:`CloudStackNode` :param address: CloudStackAddress which should be used :type address: :class:`CloudStackAddress` :param protocol: Protocol which should be used (TCP or UDP) :type protocol: ``str`` :param start_port: Start port which should be used :type start_port: ``int`` :param end_port: End port which should be used :type end_port: ``int`` :rtype: :class:`CloudStackForwardingRule` """ protocol = protocol.upper() if protocol not in ('TCP', 'UDP'): return None args = { 'ipaddressid': address.id, 'protocol': protocol, 'startport': int(start_port) } if end_port is not None: args['endport'] = int(end_port) result = self._async_request(command='createIpForwardingRule', params=args, method='GET') result = result['ipforwardingrule'] rule = CloudStackIPForwardingRule(node, result['id'], address, protocol, start_port, end_port) node.extra['ip_forwarding_rules'].append(rule) return rule def ex_delete_ip_forwarding_rule(self, node, rule): """ Remove a NAT/firewall forwarding rule. :param node: Node which should be used :type node: :class:`CloudStackNode` :param rule: Forwarding rule which should be used :type rule: :class:`CloudStackForwardingRule` :rtype: ``bool`` """ node.extra['ip_forwarding_rules'].remove(rule) self._async_request(command='deleteIpForwardingRule', params={'id': rule.id}, method='GET') return True def ex_create_network_acllist(self, name, vpc_id, description=None): """ Create an ACL List for a network within a VPC. :param name: Name of the network ACL List :type name: ``string`` :param vpc_id: Id of the VPC associated with this network ACL List :type vpc_id: ``string`` :param description: Description of the network ACL List :type description: ``string`` :rtype: :class:`CloudStackNetworkACLList` """ args = { 'name': name, 'vpcid': vpc_id } if description: args['description'] = description result = self._sync_request(command='createNetworkACLList', params=args, method='GET') acl_list = CloudStackNetworkACLList(result['id'], name, vpc_id, self, description) return acl_list def ex_create_network_acl(self, protocol, acl_id, cidr_list, start_port, end_port, action=None, traffic_type=None): """ Creates an ACL rule in the given network (the network has to belong to VPC) :param protocol: the protocol for the ACL rule. Valid values are TCP/UDP/ICMP/ALL or valid protocol number :type protocol: ``string`` :param acl_id: Name of the network ACL List :type acl_id: ``str`` :param cidr_list: the cidr list to allow traffic from/to :type cidr_list: ``str`` :param start_port: the starting port of ACL :type start_port: ``str`` :param end_port: the ending port of ACL :type end_port: ``str`` :param action: scl entry action, allow or deny :type action: ``str`` :param traffic_type: the traffic type for the ACL,can be Ingress or Egress, defaulted to Ingress if not specified :type traffic_type: ``str`` :rtype: :class:`CloudStackNetworkACL` """ args = { 'protocol': protocol, 'aclid': acl_id, 'cidrlist': cidr_list, 'startport': start_port, 'endport': end_port } if action: args['action'] = action else: action = "allow" if traffic_type: args['traffictype'] = traffic_type result = self._async_request(command='createNetworkACL', params=args, method='GET') acl = CloudStackNetworkACL(result['networkacl']['id'], protocol, acl_id, action, cidr_list, start_port, end_port, traffic_type) return acl def ex_list_network_acllists(self): """ Lists all network ACLs :rtype: ``list`` of :class:`CloudStackNetworkACLList` """ acllists = [] result = self._sync_request(command='listNetworkACLLists', method='GET') if not result: return acllists for acllist in result['networkacllist']: acllists.append(CloudStackNetworkACLList(acllist['id'], acllist['name'], acllist.get('vpcid', []), self, acllist['description'])) return acllists def ex_replace_network_acllist(self, acl_id, network_id): """ Create an ACL List for a network within a VPC.Replaces ACL associated with a Network or private gateway :param acl_id: the ID of the network ACL :type acl_id: ``string`` :param network_id: the ID of the network :type network_id: ``string`` :rtype: :class:`CloudStackNetworkACLList` """ args = { 'aclid': acl_id, 'networkid': network_id } self._async_request(command='replaceNetworkACLList', params=args, method='GET') return True def ex_list_network_acl(self): """ Lists all network ACL items :rtype: ``list`` of :class:`CloudStackNetworkACL` """ acls = [] result = self._sync_request(command='listNetworkACLs', method='GET') if not result: return acls for acl in result['networkacl']: acls.append(CloudStackNetworkACL(acl['id'], acl['protocol'], acl['aclid'], acl['action'], acl['cidrlist'], acl.get('startport', []), acl.get('endport', []), acl['traffictype'])) return acls def ex_list_keypairs(self, **kwargs): """ List Registered SSH Key Pairs :param projectid: list objects by project :type projectid: ``str`` :param page: The page to list the keypairs from :type page: ``int`` :param keyword: List by keyword :type keyword: ``str`` :param listall: If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false :type listall: ``bool`` :param pagesize: The number of results per page :type pagesize: ``int`` :param account: List resources by account. Must be used with the domainId parameter :type account: ``str`` :param isrecursive: Defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves. :type isrecursive: ``bool`` :param fingerprint: A public key fingerprint to look for :type fingerprint: ``str`` :param name: A key pair name to look for :type name: ``str`` :param domainid: List only resources belonging to the domain specified :type domainid: ``str`` :return: A list of keypair dictionaries :rtype: ``list`` of ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'list_key_pairs method') key_pairs = self.list_key_pairs(**kwargs) result = [] for key_pair in key_pairs: item = { 'name': key_pair.name, 'fingerprint': key_pair.fingerprint, 'privateKey': key_pair.private_key } result.append(item) return result def ex_create_keypair(self, name, **kwargs): """ Creates a SSH KeyPair, returns fingerprint and private key :param name: Name of the keypair (required) :type name: ``str`` :param projectid: An optional project for the ssh key :type projectid: ``str`` :param domainid: An optional domainId for the ssh key. If the account parameter is used, domainId must also be used. :type domainid: ``str`` :param account: An optional account for the ssh key. Must be used with domainId. :type account: ``str`` :return: A keypair dictionary :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'create_key_pair method') key_pair = self.create_key_pair(name=name, **kwargs) result = { 'name': key_pair.name, 'fingerprint': key_pair.fingerprint, 'privateKey': key_pair.private_key } return result def ex_import_keypair_from_string(self, name, key_material): """ Imports a new public key where the public key is passed in as a string :param name: The name of the public key to import. :type name: ``str`` :param key_material: The contents of a public key file. :type key_material: ``str`` :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_string method') key_pair = self.import_key_pair_from_string(name=name, key_material=key_material) result = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint } return result def ex_import_keypair(self, name, keyfile): """ Imports a new public key where the public key is passed via a filename :param name: The name of the public key to import. :type name: ``str`` :param keyfile: The filename with path of the public key to import. :type keyfile: ``str`` :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_file method') key_pair = self.import_key_pair_from_file(name=name, key_file_path=keyfile) result = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint } return result def ex_delete_keypair(self, keypair, **kwargs): """ Deletes an existing SSH KeyPair :param keypair: Name of the keypair (required) :type keypair: ``str`` :param projectid: The project associated with keypair :type projectid: ``str`` :param domainid: The domain ID associated with the keypair :type domainid: ``str`` :param account: The account associated with the keypair. Must be used with the domainId parameter. :type account: ``str`` :return: True of False based on success of Keypair deletion :rtype: ``bool`` """ warnings.warn('This method has been deprecated in favor of ' 'delete_key_pair method') key_pair = KeyPair(name=keypair, public_key=None, fingerprint=None, driver=self) return self.delete_key_pair(key_pair=key_pair) def ex_list_security_groups(self, **kwargs): """ Lists Security Groups :param domainid: List only resources belonging to the domain specified :type domainid: ``str`` :param account: List resources by account. Must be used with the domainId parameter. :type account: ``str`` :param listall: If set to false, list only resources belonging to the command's caller; if set to true list resources that the caller is authorized to see. Default value is false :type listall: ``bool`` :param pagesize: Number of entries per page :type pagesize: ``int`` :param keyword: List by keyword :type keyword: ``str`` :param tags: List resources by tags (key/value pairs) :type tags: ``dict`` :param id: list the security group by the id provided :type id: ``str`` :param securitygroupname: lists security groups by name :type securitygroupname: ``str`` :param virtualmachineid: lists security groups by virtual machine id :type virtualmachineid: ``str`` :param projectid: list objects by project :type projectid: ``str`` :param isrecursive: (boolean) defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves. :type isrecursive: ``bool`` :param page: (integer) :type page: ``int`` :rtype ``list`` """ extra_args = kwargs.copy() res = self._sync_request(command='listSecurityGroups', params=extra_args, method='GET') security_groups = res.get('securitygroup', []) return security_groups def ex_create_security_group(self, name, **kwargs): """ Creates a new Security Group :param name: name of the security group (required) :type name: ``str`` :param account: An optional account for the security group. Must be used with domainId. :type account: ``str`` :param domainid: An optional domainId for the security group. If the account parameter is used, domainId must also be used. :type domainid: ``str`` :param description: The description of the security group :type description: ``str`` :param projectid: Deploy vm for the project :type projectid: ``str`` :rtype: ``dict`` """ extra_args = kwargs.copy() for sg in self.ex_list_security_groups(): if name in sg['name']: raise LibcloudError('This Security Group name already exists') params = {'name': name} params.update(extra_args) return self._sync_request(command='createSecurityGroup', params=params, method='GET')['securitygroup'] def ex_delete_security_group(self, name): """ Deletes a given Security Group :param domainid: The domain ID of account owning the security group :type domainid: ``str`` :param id: The ID of the security group. Mutually exclusive with name parameter :type id: ``str`` :param name: The ID of the security group. Mutually exclusive with id parameter :type name: ``str`` :param account: The account of the security group. Must be specified with domain ID :type account: ``str`` :param projectid: The project of the security group :type projectid: ``str`` :rtype: ``bool`` """ return self._sync_request(command='deleteSecurityGroup', params={'name': name}, method='GET')['success'] def ex_authorize_security_group_ingress(self, securitygroupname, protocol, cidrlist, startport=None, endport=None, icmptype=None, icmpcode=None, **kwargs): """ Creates a new Security Group Ingress rule :param securitygroupname: The name of the security group. Mutually exclusive with securitygroupid. :type securitygroupname: ``str`` :param protocol: Can be TCP, UDP or ICMP. Sometime other protocols can be used like AH, ESP or GRE. :type protocol: ``str`` :param cidrlist: Source address CIDR for which this rule applies. :type cidrlist: ``str`` :param startport: Start port of the range for this ingress rule. Applies to protocols TCP and UDP. :type startport: ``int`` :param endport: End port of the range for this ingress rule. It can be None to set only one port. Applies to protocols TCP and UDP. :type endport: ``int`` :param icmptype: Type of the ICMP packet (eg: 8 for Echo Request). -1 or None means "all types". Applies to protocol ICMP. :type icmptype: ``int`` :param icmpcode: Code of the ICMP packet for the specified type. If the specified type doesn't require a code set this value to 0. -1 or None means "all codes". Applies to protocol ICMP. :type icmpcode: ``int`` :keyword account: An optional account for the security group. Must be used with domainId. :type account: ``str`` :keyword domainid: An optional domainId for the security group. If the account parameter is used, domainId must also be used. :keyword projectid: An optional project of the security group :type projectid: ``str`` :keyword securitygroupid: The ID of the security group. Mutually exclusive with securitygroupname :type securitygroupid: ``str`` :keyword usersecuritygrouplist: User to security group mapping :type usersecuritygrouplist: ``dict`` :rtype: ``dict`` """ args = kwargs.copy() protocol = protocol.upper() args.update({ 'securitygroupname': securitygroupname, 'protocol': protocol, 'cidrlist': cidrlist }) if protocol not in ('TCP', 'UDP') and \ (startport is not None or endport is not None): raise LibcloudError('"startport" and "endport" are only valid ' 'with protocol TCP or UDP.') if protocol != 'ICMP' and \ (icmptype is not None or icmpcode is not None): raise LibcloudError('"icmptype" and "icmpcode" are only valid ' 'with protocol ICMP.') if protocol in ('TCP', 'UDP'): if startport is None: raise LibcloudError('Protocols TCP and UDP require at least ' '"startport" to be set.') if startport is not None and endport is None: endport = startport args.update({ 'startport': startport, 'endport': endport }) if protocol == 'ICMP': if icmptype is None: icmptype = -1 if icmpcode is None: icmpcode = -1 args.update({ 'icmptype': icmptype, 'icmpcode': icmpcode }) return self._async_request(command='authorizeSecurityGroupIngress', params=args, method='GET')['securitygroup'] def ex_revoke_security_group_ingress(self, rule_id): """ Revoke/delete an ingress security rule :param id: The ID of the ingress security rule :type id: ``str`` :rtype: ``bool`` """ self._async_request(command='revokeSecurityGroupIngress', params={'id': rule_id}, method='GET') return True def ex_create_affinity_group(self, name, group_type): """ Creates a new Affinity Group :param name: Name of the affinity group :type name: ``str`` :param group_type: Type of the affinity group from the available affinity/anti-affinity group types :type group_type: :class:`CloudStackAffinityGroupType` :param description: Optional description of the affinity group :type description: ``str`` :param domainid: domain ID of the account owning the affinity group :type domainid: ``str`` :rtype: :class:`CloudStackAffinityGroup` """ for ag in self.ex_list_affinity_groups(): if name == ag.name: raise LibcloudError('This Affinity Group name already exists') params = {'name': name, 'type': group_type.type} result = self._async_request(command='createAffinityGroup', params=params, method='GET') return self._to_affinity_group(result['affinitygroup']) def ex_delete_affinity_group(self, affinity_group): """ Delete an Affinity Group :param affinity_group: Instance of affinity group :type affinity_group: :class:`CloudStackAffinityGroup` :rtype ``bool`` """ return self._async_request(command='deleteAffinityGroup', params={'id': affinity_group.id}, method='GET')['success'] def ex_update_node_affinity_group(self, node, affinity_group_list): """ Updates the affinity/anti-affinity group associations of a virtual machine. The VM has to be stopped and restarted for the new properties to take effect. :param node: Node to update. :type node: :class:`CloudStackNode` :param affinity_group_list: List of CloudStackAffinityGroup to associate :type affinity_group_list: ``list`` of :class:`CloudStackAffinityGroup` :rtype :class:`CloudStackNode` """ affinity_groups = ','.join(ag.id for ag in affinity_group_list) result = self._async_request(command='updateVMAffinityGroup', params={ 'id': node.id, 'affinitygroupids': affinity_groups}, method='GET') return self._to_node(data=result['virtualmachine']) def ex_list_affinity_groups(self): """ List Affinity Groups :rtype ``list`` of :class:`CloudStackAffinityGroup` """ result = self._sync_request(command='listAffinityGroups', method='GET') if not result.get('count'): return [] affinity_groups = [] for ag in result['affinitygroup']: affinity_groups.append(self._to_affinity_group(ag)) return affinity_groups def ex_list_affinity_group_types(self): """ List Affinity Group Types :rtype ``list`` of :class:`CloudStackAffinityGroupTypes` """ result = self._sync_request(command='listAffinityGroupTypes', method='GET') if not result.get('count'): return [] affinity_group_types = [] for agt in result['affinityGroupType']: affinity_group_types.append( CloudStackAffinityGroupType(agt['type'])) return affinity_group_types def ex_register_iso(self, name, url, location=None, **kwargs): """ Registers an existing ISO by URL. :param name: Name which should be used :type name: ``str`` :param url: Url should be used :type url: ``str`` :param location: Location which should be used :type location: :class:`NodeLocation` :rtype: ``str`` """ if location is None: location = self.list_locations()[0] params = {'name': name, 'displaytext': name, 'url': url, 'zoneid': location.id} params['bootable'] = kwargs.pop('bootable', False) if params['bootable']: os_type_id = kwargs.pop('ostypeid', None) if not os_type_id: raise LibcloudError('If bootable=True, ostypeid is required!') params['ostypeid'] = os_type_id return self._sync_request(command='registerIso', name=name, displaytext=name, url=url, zoneid=location.id, params=params) def ex_limits(self): """ Extra call to get account's resource limits, such as the amount of instances, volumes, snapshots and networks. CloudStack uses integers as the resource type so we will convert them to a more human readable string using the resource map A list of the resource type mappings can be found at http://goo.gl/17C6Gk :return: dict :rtype: ``dict`` """ result = self._sync_request(command='listResourceLimits', method='GET') limits = {} resource_map = { 0: 'max_instances', 1: 'max_public_ips', 2: 'max_volumes', 3: 'max_snapshots', 4: 'max_images', 5: 'max_projects', 6: 'max_networks', 7: 'max_vpc', 8: 'max_cpu', 9: 'max_memory', 10: 'max_primary_storage', 11: 'max_secondary_storage' } for limit in result.get('resourcelimit', []): # We will ignore unknown types resource = resource_map.get(int(limit['resourcetype']), None) if not resource: continue limits[resource] = int(limit['max']) return limits def ex_create_tags(self, resource_ids, resource_type, tags): """ Create tags for a resource (Node/StorageVolume/etc). A list of resource types can be found at http://goo.gl/6OKphH :param resource_ids: Resource IDs to be tagged. The resource IDs must all be associated with the resource_type. For example, for virtual machines (UserVm) you can only specify a list of virtual machine IDs. :type resource_ids: ``list`` of resource IDs :param resource_type: Resource type (eg: UserVm) :type resource_type: ``str`` :param tags: A dictionary or other mapping of strings to strings, associating tag names with tag values. :type tags: ``dict`` :rtype: ``bool`` """ params = {'resourcetype': resource_type, 'resourceids': ','.join(resource_ids)} for i, key in enumerate(tags): params['tags[%d].key' % i] = key params['tags[%d].value' % i] = tags[key] self._async_request(command='createTags', params=params, method='GET') return True def ex_delete_tags(self, resource_ids, resource_type, tag_keys): """ Delete tags from a resource. :param resource_ids: Resource IDs to be tagged. The resource IDs must all be associated with the resource_type. For example, for virtual machines (UserVm) you can only specify a list of virtual machine IDs. :type resource_ids: ``list`` of resource IDs :param resource_type: Resource type (eg: UserVm) :type resource_type: ``str`` :param tag_keys: A list of keys to delete. CloudStack only requires the keys from the key/value pair. :type tag_keys: ``list`` :rtype: ``bool`` """ params = {'resourcetype': resource_type, 'resourceids': ','.join(resource_ids)} for i, key in enumerate(tag_keys): params['tags[%s].key' % i] = key self._async_request(command='deleteTags', params=params, method='GET') return True def list_snapshots(self): """ Describe all snapshots. :rtype: ``list`` of :class:`VolumeSnapshot` """ snapshots = self._sync_request('listSnapshots', method='GET') list_snapshots = [] for snap in snapshots['snapshot']: list_snapshots.append(self._to_snapshot(snap)) return list_snapshots def create_volume_snapshot(self, volume, name=None): """ Create snapshot from volume :param volume: Instance of ``StorageVolume`` :type volume: ``StorageVolume`` :param name: The name of the snapshot is disregarded by CloudStack drivers :type name: `str` :rtype: :class:`VolumeSnapshot` """ snapshot = self._async_request(command='createSnapshot', params={'volumeid': volume.id}, method='GET') return self._to_snapshot(snapshot['snapshot']) def destroy_volume_snapshot(self, snapshot): self._async_request(command='deleteSnapshot', params={'id': snapshot.id}, method='GET') return True def ex_create_snapshot_template(self, snapshot, name, ostypeid, displaytext=None): """ Create a template from a snapshot :param snapshot: Instance of ``VolumeSnapshot`` :type volume: ``VolumeSnapshot`` :param name: the name of the template :type name: ``str`` :param name: the os type id :type name: ``str`` :param name: the display name of the template :type name: ``str`` :rtype: :class:`NodeImage` """ if not displaytext: displaytext = name resp = self._async_request(command='createTemplate', params={ 'displaytext': displaytext, 'name': name, 'ostypeid': ostypeid, 'snapshotid': snapshot.id}) img = resp.get('template') extra = { 'hypervisor': img['hypervisor'], 'format': img['format'], 'os': img['ostypename'], 'displaytext': img['displaytext'] } return NodeImage(id=img['id'], name=img['name'], driver=self.connection.driver, extra=extra) def ex_list_os_types(self): """ List all registered os types (needed for snapshot creation) :rtype: ``list`` """ ostypes = self._sync_request('listOsTypes') return ostypes['ostype'] def ex_list_nics(self, node): """ List the available networks :param vm: Node Object :type vm: :class:`CloudStackNode :rtype ``list`` of :class:`CloudStackNic` """ res = self._sync_request(command='listNics', params={'virtualmachineid': node.id}, method='GET') items = res.get('nic', []) nics = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['nic'] for item in items: extra = self._get_extra_dict(item, extra_map) nics.append(CloudStackNic( id=item['id'], network_id=item['networkid'], net_mask=item['netmask'], gateway=item['gateway'], ip_address=item['ipaddress'], is_default=item['isdefault'], mac_address=item['macaddress'], driver=self, extra=extra)) return nics def ex_attach_nic_to_node(self, node, network, ip_address=None): """ Add an extra Nic to a VM :param network: NetworkOffering object :type network: :class:'CloudStackNetwork` :param node: Node Object :type node: :class:'CloudStackNode` :param ip_address: Optional, specific IP for this Nic :type ip_address: ``str`` :rtype: ``bool`` """ args = { 'virtualmachineid': node.id, 'networkid': network.id } if ip_address is not None: args['ipaddress'] = ip_address self._async_request(command='addNicToVirtualMachine', params=args) return True def ex_detach_nic_from_node(self, nic, node): """ Remove Nic from a VM :param nic: Nic object :type nic: :class:'CloudStackNetwork` :param node: Node Object :type node: :class:'CloudStackNode` :rtype: ``bool`` """ self._async_request(command='removeNicFromVirtualMachine', params={'nicid': nic.id, 'virtualmachineid': node.id}) return True def ex_list_vpn_gateways(self, account=None, domain_id=None, for_display=None, id=None, is_recursive=None, keyword=None, list_all=None, page=None, page_size=None, project_id=None, vpc_id=None): """ List VPN Gateways. :param account: List resources by account (must be used with the domain_id parameter). :type account: ``str`` :param domain_id: List only resources belonging to the domain specified. :type domain_id: ``str`` :param for_display: List resources by display flag (only root admin is eligible to pass this parameter). :type for_display: ``bool`` :param id: ID of the VPN Gateway. :type id: ``str`` :param is_recursive: Defaults to False, but if true, lists all resources from the parent specified by the domain ID till leaves. :type is_recursive: ``bool`` :param keyword: List by keyword. :type keyword: ``str`` :param list_all: If set to False, list only resources belonging to the command's caller; if set to True - list resources that the caller is authorized to see. Default value is False. :type list_all: ``str`` :param page: Start from page. :type page: ``int`` :param page_size: Items per page. :type page_size: ``int`` :param project_id: List objects by project. :type project_id: ``str`` :param vpc_id: List objects by VPC. :type vpc_id: ``str`` :rtype: ``list`` of :class:`CloudStackVpnGateway` """ args = {} if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if for_display is not None: args['fordisplay'] = for_display if id is not None: args['id'] = id if is_recursive is not None: args['isrecursive'] = is_recursive if keyword is not None: args['keyword'] = keyword if list_all is not None: args['listall'] = list_all if page is not None: args['page'] = page if page_size is not None: args['pagesize'] = page_size if project_id is not None: args['projectid'] = project_id if vpc_id is not None: args['vpcid'] = vpc_id res = self._sync_request(command='listVpnGateways', params=args, method='GET') items = res.get('vpngateway', []) vpn_gateways = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpngateway'] for item in items: extra = self._get_extra_dict(item, extra_map) vpn_gateways.append(CloudStackVpnGateway( id=item['id'], account=item['account'], domain=item['domain'], domain_id=item['domainid'], public_ip=item['publicip'], vpc_id=item['vpcid'], driver=self, extra=extra)) return vpn_gateways def ex_create_vpn_gateway(self, vpc, for_display=None): """ Creates a VPN Gateway. :param vpc: VPC to create the Gateway for (required). :type vpc: :class: `CloudStackVPC` :param for_display: Display the VPC to the end user or not. :type for_display: ``bool`` :rtype: :class: `CloudStackVpnGateway` """ args = { 'vpcid': vpc.id, } if for_display is not None: args['fordisplay'] = for_display res = self._async_request(command='createVpnGateway', params=args, method='GET') item = res['vpngateway'] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpngateway'] return CloudStackVpnGateway(id=item['id'], account=item['account'], domain=item['domain'], domain_id=item['domainid'], public_ip=item['publicip'], vpc_id=vpc.id, driver=self, extra=self._get_extra_dict(item, extra_map)) def ex_delete_vpn_gateway(self, vpn_gateway): """ Deletes a VPN Gateway. :param vpn_gateway: The VPN Gateway (required). :type vpn_gateway: :class:`CloudStackVpnGateway` :rtype: ``bool`` """ res = self._async_request(command='deleteVpnGateway', params={'id': vpn_gateway.id}, method='GET') return res['success'] def ex_list_vpn_customer_gateways(self, account=None, domain_id=None, id=None, is_recursive=None, keyword=None, list_all=None, page=None, page_size=None, project_id=None): """ List VPN Customer Gateways. :param account: List resources by account (must be used with the domain_id parameter). :type account: ``str`` :param domain_id: List only resources belonging to the domain specified. :type domain_id: ``str`` :param id: ID of the VPN Customer Gateway. :type id: ``str`` :param is_recursive: Defaults to False, but if true, lists all resources from the parent specified by the domain_id till leaves. :type is_recursive: ``bool`` :param keyword: List by keyword. :type keyword: ``str`` :param list_all: If set to False, list only resources belonging to the command's caller; if set to True - list resources that the caller is authorized to see. Default value is False. :type list_all: ``str`` :param page: Start from page. :type page: ``int`` :param page_size: Items per page. :type page_size: ``int`` :param project_id: List objects by project. :type project_id: ``str`` :rtype: ``list`` of :class:`CloudStackVpnCustomerGateway` """ args = {} if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if id is not None: args['id'] = id if is_recursive is not None: args['isrecursive'] = is_recursive if keyword is not None: args['keyword'] = keyword if list_all is not None: args['listall'] = list_all if page is not None: args['page'] = page if page_size is not None: args['pagesize'] = page_size if project_id is not None: args['projectid'] = project_id res = self._sync_request(command='listVpnCustomerGateways', params=args, method='GET') items = res.get('vpncustomergateway', []) vpn_customer_gateways = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpncustomergateway'] for item in items: extra = self._get_extra_dict(item, extra_map) vpn_customer_gateways.append(CloudStackVpnCustomerGateway( id=item['id'], cidr_list=item['cidrlist'], esp_policy=item['esppolicy'], gateway=item['gateway'], ike_policy=item['ikepolicy'], ipsec_psk=item['ipsecpsk'], driver=self, extra=extra)) return vpn_customer_gateways def ex_create_vpn_customer_gateway(self, cidr_list, esp_policy, gateway, ike_policy, ipsec_psk, account=None, domain_id=None, dpd=None, esp_lifetime=None, ike_lifetime=None, name=None): """ Creates a VPN Customer Gateway. :param cidr_list: Guest CIDR list of the Customer Gateway (required). :type cidr_list: ``str`` :param esp_policy: ESP policy of the Customer Gateway (required). :type esp_policy: ``str`` :param gateway: Public IP address of the Customer Gateway (required). :type gateway: ``str`` :param ike_policy: IKE policy of the Customer Gateway (required). :type ike_policy: ``str`` :param ipsec_psk: IPsec preshared-key of the Customer Gateway (required). :type ipsec_psk: ``str`` :param account: The associated account with the Customer Gateway (must be used with the domain_id param). :type account: ``str`` :param domain_id: The domain ID associated with the Customer Gateway. If used with the account parameter returns the gateway associated with the account for the specified domain. :type domain_id: ``str`` :param dpd: If DPD is enabled for the VPN connection. :type dpd: ``bool`` :param esp_lifetime: Lifetime of phase 2 VPN connection to the Customer Gateway, in seconds. :type esp_lifetime: ``int`` :param ike_lifetime: Lifetime of phase 1 VPN connection to the Customer Gateway, in seconds. :type ike_lifetime: ``int`` :param name: Name of the Customer Gateway. :type name: ``str`` :rtype: :class: `CloudStackVpnCustomerGateway` """ args = { 'cidrlist': cidr_list, 'esppolicy': esp_policy, 'gateway': gateway, 'ikepolicy': ike_policy, 'ipsecpsk': ipsec_psk } if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if dpd is not None: args['dpd'] = dpd if esp_lifetime is not None: args['esplifetime'] = esp_lifetime if ike_lifetime is not None: args['ikelifetime'] = ike_lifetime if name is not None: args['name'] = name res = self._async_request(command='createVpnCustomerGateway', params=args, method='GET') item = res['vpncustomergateway'] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpncustomergateway'] return CloudStackVpnCustomerGateway(id=item['id'], cidr_list=cidr_list, esp_policy=esp_policy, gateway=gateway, ike_policy=ike_policy, ipsec_psk=ipsec_psk, driver=self, extra=self._get_extra_dict( item, extra_map)) def ex_delete_vpn_customer_gateway(self, vpn_customer_gateway): """ Deletes a VPN Customer Gateway. :param vpn_customer_gateway: The VPN Customer Gateway (required). :type vpn_customer_gateway: :class:`CloudStackVpnCustomerGateway` :rtype: ``bool`` """ res = self._async_request(command='deleteVpnCustomerGateway', params={'id': vpn_customer_gateway.id}, method='GET') return res['success'] def ex_list_vpn_connections(self, account=None, domain_id=None, for_display=None, id=None, is_recursive=None, keyword=None, list_all=None, page=None, page_size=None, project_id=None, vpc_id=None): """ List VPN Connections. :param account: List resources by account (must be used with the domain_id parameter). :type account: ``str`` :param domain_id: List only resources belonging to the domain specified. :type domain_id: ``str`` :param for_display: List resources by display flag (only root admin is eligible to pass this parameter). :type for_display: ``bool`` :param id: ID of the VPN Connection. :type id: ``str`` :param is_recursive: Defaults to False, but if true, lists all resources from the parent specified by the domain_id till leaves. :type is_recursive: ``bool`` :param keyword: List by keyword. :type keyword: ``str`` :param list_all: If set to False, list only resources belonging to the command's caller; if set to True - list resources that the caller is authorized to see. Default value is False. :type list_all: ``str`` :param page: Start from page. :type page: ``int`` :param page_size: Items per page. :type page_size: ``int`` :param project_id: List objects by project. :type project_id: ``str`` :param vpc_id: List objects by VPC. :type vpc_id: ``str`` :rtype: ``list`` of :class:`CloudStackVpnConnection` """ args = {} if account is not None: args['account'] = account if domain_id is not None: args['domainid'] = domain_id if for_display is not None: args['fordisplay'] = for_display if id is not None: args['id'] = id if is_recursive is not None: args['isrecursive'] = is_recursive if keyword is not None: args['keyword'] = keyword if list_all is not None: args['listall'] = list_all if page is not None: args['page'] = page if page_size is not None: args['pagesize'] = page_size if project_id is not None: args['projectid'] = project_id if vpc_id is not None: args['vpcid'] = vpc_id res = self._sync_request(command='listVpnConnections', params=args, method='GET') items = res.get('vpnconnection', []) vpn_connections = [] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpnconnection'] for item in items: extra = self._get_extra_dict(item, extra_map) vpn_connections.append(CloudStackVpnConnection( id=item['id'], passive=item['passive'], vpn_customer_gateway_id=item['s2scustomergatewayid'], vpn_gateway_id=item['s2svpngatewayid'], state=item['state'], driver=self, extra=extra)) return vpn_connections def ex_create_vpn_connection(self, vpn_customer_gateway, vpn_gateway, for_display=None, passive=None): """ Creates a VPN Connection. :param vpn_customer_gateway: The VPN Customer Gateway (required). :type vpn_customer_gateway: :class:`CloudStackVpnCustomerGateway` :param vpn_gateway: The VPN Gateway (required). :type vpn_gateway: :class:`CloudStackVpnGateway` :param for_display: Display the Connection to the end user or not. :type for_display: ``str`` :param passive: If True, sets the connection to be passive. :type passive: ``bool`` :rtype: :class: `CloudStackVpnConnection` """ args = { 's2scustomergatewayid': vpn_customer_gateway.id, 's2svpngatewayid': vpn_gateway.id, } if for_display is not None: args['fordisplay'] = for_display if passive is not None: args['passive'] = passive res = self._async_request(command='createVpnConnection', params=args, method='GET') item = res['vpnconnection'] extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpnconnection'] return CloudStackVpnConnection( id=item['id'], passive=item['passive'], vpn_customer_gateway_id=vpn_customer_gateway.id, vpn_gateway_id=vpn_gateway.id, state=item['state'], driver=self, extra=self._get_extra_dict(item, extra_map)) def ex_delete_vpn_connection(self, vpn_connection): """ Deletes a VPN Connection. :param vpn_connection: The VPN Connection (required). :type vpn_connection: :class:`CloudStackVpnConnection` :rtype: ``bool`` """ res = self._async_request(command='deleteVpnConnection', params={'id': vpn_connection.id}, method='GET') return res['success'] def _to_snapshot(self, data): """ Create snapshot object from data :param data: Node data object. :type data: ``dict`` :rtype: :class:`VolumeSnapshot` """ extra = { 'tags': data.get('tags', None), 'name': data.get('name', None), 'volume_id': data.get('volumeid', None), } return VolumeSnapshot(data['id'], driver=self, extra=extra) def _to_node(self, data, public_ips=None): """ :param data: Node data object. :type data: ``dict`` :param public_ips: A list of additional IP addresses belonging to this node. (optional) :type public_ips: ``list`` or ``None`` """ id = data['id'] if 'name' in data: name = data['name'] elif 'displayname' in data: name = data['displayname'] else: name = None state = self.NODE_STATE_MAP[data['state']] public_ips = public_ips if public_ips else [] private_ips = [] for nic in data['nic']: if is_private_subnet(nic['ipaddress']): private_ips.append(nic['ipaddress']) else: public_ips.append(nic['ipaddress']) security_groups = data.get('securitygroup', []) if security_groups: security_groups = [sg['name'] for sg in security_groups] affinity_groups = data.get('affinitygroup', []) if affinity_groups: affinity_groups = [ag['id'] for ag in affinity_groups] created = data.get('created', False) extra = self._get_extra_dict(data, RESOURCE_EXTRA_ATTRIBUTES_MAP['node']) # Add additional parameters to extra extra['security_group'] = security_groups extra['affinity_group'] = affinity_groups extra['ip_addresses'] = [] extra['ip_forwarding_rules'] = [] extra['port_forwarding_rules'] = [] extra['created'] = created if 'tags' in data: extra['tags'] = self._get_resource_tags(data['tags']) node = CloudStackNode(id=id, name=name, state=state, public_ips=list(set(public_ips)), private_ips=private_ips, driver=self, extra=extra) return node def _to_key_pairs(self, data): key_pairs = [self._to_key_pair(data=item) for item in data] return key_pairs def _to_key_pair(self, data): key_pair = KeyPair(name=data['name'], fingerprint=data['fingerprint'], public_key=data.get('publickey', None), private_key=data.get('privatekey', None), driver=self) return key_pair def _to_affinity_group(self, data): affinity_group = CloudStackAffinityGroup( id=data['id'], name=data['name'], group_type=CloudStackAffinityGroupType(data['type']), account=data.get('account', ''), domain=data.get('domain', ''), domainid=data.get('domainid', ''), description=data.get('description', ''), virtualmachine_ids=data.get('virtualmachineIds', '')) return affinity_group def _get_resource_tags(self, tag_set): """ Parse tags from the provided element and return a dictionary with key/value pairs. :param tag_set: A list of key/value tag pairs :type tag_set: ``list``` :rtype: ``dict`` """ tags = {} for tag in tag_set: key = tag['key'] value = tag['value'] tags[key] = value return tags def _get_extra_dict(self, response, mapping): """ Extract attributes from the element based on rules provided in the mapping dictionary. :param response: The JSON response to parse the values from. :type response: ``dict`` :param mapping: Dictionary with the extra layout :type mapping: ``dict`` :rtype: ``dict`` """ extra = {} for attribute, values in mapping.items(): transform_func = values['transform_func'] value = response.get(values['key_name'], None) if value is not None: extra[attribute] = transform_func(value) else: extra[attribute] = None return extra def _to_volume_state(self, vol): state = self.VOLUME_STATE_MAP.get(vol['state'], StorageVolumeState.UNKNOWN) # If a volume is 'Ready' and is attached to a virtualmachine, set # the status to INUSE if state == StorageVolumeState.AVAILABLE and 'virtualmachineid' in vol: state = StorageVolumeState.INUSE return state apache-libcloud-2.2.1/libcloud/compute/drivers/ec2.py0000664000175000017500000077634313153541406022416 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Amazon EC2, Eucalyptus, Nimbus and Outscale drivers. """ import re import sys import base64 import copy import warnings import time from libcloud.utils.py3 import ET from libcloud.utils.py3 import b, basestring, ensure_string from libcloud.utils.xml import fixxpath, findtext, findattr, findall from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint from libcloud.utils.publickey import get_pubkey_comment from libcloud.utils.iso8601 import parse_date from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection from libcloud.common.aws import DEFAULT_SIGNATURE_VERSION from libcloud.common.types import (InvalidCredsError, MalformedResponseError, LibcloudError) from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot from libcloud.compute.base import KeyPair from libcloud.compute.types import NodeState, KeyPairDoesNotExistError, \ StorageVolumeState, VolumeSnapshotState __all__ = [ 'API_VERSION', 'NAMESPACE', 'INSTANCE_TYPES', 'OUTSCALE_INSTANCE_TYPES', 'OUTSCALE_SAS_REGION_DETAILS', 'OUTSCALE_INC_REGION_DETAILS', 'DEFAULT_EUCA_API_VERSION', 'EUCA_NAMESPACE', 'EC2NodeDriver', 'BaseEC2NodeDriver', 'NimbusNodeDriver', 'EucNodeDriver', 'OutscaleSASNodeDriver', 'OutscaleINCNodeDriver', 'EC2NodeLocation', 'EC2ReservedNode', 'EC2SecurityGroup', 'EC2ImportSnapshotTask', 'EC2PlacementGroup', 'EC2Network', 'EC2NetworkSubnet', 'EC2NetworkInterface', 'EC2RouteTable', 'EC2Route', 'EC2SubnetAssociation', 'ExEC2AvailabilityZone', 'IdempotentParamError' ] API_VERSION = '2016-11-15' NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION) # Eucalyptus Constants DEFAULT_EUCA_API_VERSION = '3.3.0' EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION) # Outscale Constants DEFAULT_OUTSCALE_API_VERSION = '2016-04-01' OUTSCALE_NAMESPACE = 'http://api.outscale.com/wsdl/fcuext/2014-04-15/' """ Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them. From http://aws.amazon.com/ec2/instance-types/ and ram = [MiB], disk = [GB] """ def GiB(value): return int(value * 1024) INSTANCE_TYPES = { 't1.micro': { 'id': 't1.micro', 'name': 'Micro Instance', 'ram': GiB(0.613), 'disk': 15, # GB 'bandwidth': None }, 'm1.small': { 'id': 'm1.small', 'name': 'Small Instance', 'ram': GiB(1.7), 'disk': 160, # GB 'bandwidth': None }, 'm1.medium': { 'id': 'm1.medium', 'name': 'Medium Instance', 'ram': GiB(3.75), 'disk': 410, # GB 'bandwidth': None }, 'm1.large': { 'id': 'm1.large', 'name': 'Large Instance', 'ram': GiB(7.5), 'disk': 2 * 420, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'm1.xlarge': { 'id': 'm1.xlarge', 'name': 'Extra Large Instance', 'ram': GiB(15), 'disk': 4 * 420, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'c1.medium': { 'id': 'c1.medium', 'name': 'High-CPU Medium Instance', 'ram': GiB(1.7), 'disk': 350, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'c1.xlarge': { 'id': 'c1.xlarge', 'name': 'High-CPU Extra Large Instance', 'ram': GiB(7), 'disk': 4 * 420, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'm2.xlarge': { 'id': 'm2.xlarge', 'name': 'High-Memory Extra Large Instance', 'ram': GiB(17.1), 'disk': 420, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'm2.2xlarge': { 'id': 'm2.2xlarge', 'name': 'High-Memory Double Extra Large Instance', 'ram': GiB(34.2), 'disk': 850, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'm2.4xlarge': { 'id': 'm2.4xlarge', 'name': 'High-Memory Quadruple Extra Large Instance', 'ram': GiB(68.4), 'disk': 2 * 840, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'm3.medium': { 'id': 'm3.medium', 'name': 'Medium Instance', 'ram': GiB(3.75), 'disk': 4, # GB 'bandwidth': None, 'extra': { 'cpu': 1 } }, 'm3.large': { 'id': 'm3.large', 'name': 'Large Instance', 'ram': GiB(7.5), 'disk': 32, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'm3.xlarge': { 'id': 'm3.xlarge', 'name': 'Extra Large Instance', 'ram': GiB(15), 'disk': 2 * 40, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'm3.2xlarge': { 'id': 'm3.2xlarge', 'name': 'Double Extra Large Instance', 'ram': GiB(30), 'disk': 2 * 80, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'm4.large': { 'id': 'm4.large', 'name': 'Large Instance', 'ram': GiB(8), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'm4.xlarge': { 'id': 'm4.xlarge', 'name': 'Extra Large Instance', 'ram': GiB(16), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'm4.2xlarge': { 'id': 'm4.2xlarge', 'name': 'Double Extra Large Instance', 'ram': GiB(32), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'm4.4xlarge': { 'id': 'm4.4xlarge', 'name': 'Quadruple Extra Large Instance', 'ram': GiB(64), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'm4.10xlarge': { 'id': 'm4.10xlarge', 'name': '10 Extra Large Instance', 'ram': GiB(160), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 40 } }, 'm4.16xlarge': { 'id': 'm4.16xlarge', 'name': '16 Extra Large Instance', 'ram': GiB(256), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 64 } }, 'cg1.4xlarge': { 'id': 'cg1.4xlarge', 'name': 'Cluster GPU Quadruple Extra Large Instance', 'ram': GiB(22.5), 'disk': 2 * 840, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'g2.2xlarge': { 'id': 'g2.2xlarge', 'name': 'Cluster GPU G2 Double Extra Large Instance', 'ram': GiB(15), 'disk': 60, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'g2.8xlarge': { 'id': 'g2.8xlarge', 'name': 'Cluster GPU G2 Eight Extra Large Instance', 'ram': GiB(60), 'disk': 2 * 120, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'g3.4xlarge': { 'id': 'g3.4xlarge', 'name': 'Cluster GPU G3 Four Extra Large Instance', 'ram': GiB(122), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 16, 'gpu': 1, 'gpu_ram': GiB(8) } }, 'g3.8xlarge': { 'id': 'g3.8xlarge', 'name': 'Cluster GPU G3 Eight Extra Large Instance', 'ram': GiB(244), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 32, 'gpu': 2, 'gpu_ram': GiB(16) } }, 'g3.16xlarge': { 'id': 'g3.16xlarge', 'name': 'Cluster GPU G3 16 Extra Large Instance', 'ram': GiB(488), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 64, 'gpu': 4, 'gpu_ram': GiB(32) } }, 'p2.xlarge': { 'id': 'p2.xlarge', 'name': 'Cluster GPU P2 Large Instance', 'ram': GiB(61), 'disk': 4, 'bandwidth': None }, 'p2.8xlarge': { 'id': 'p2.8xlarge', 'name': 'Cluster GPU P2 Large Instance', 'ram': GiB(488), 'disk': 32, 'bandwidth': None }, 'p2.16xlarge': { 'id': 'p2.16xlarge', 'name': 'Cluster GPU P2 Large Instance', 'ram': GiB(732), 'disk': 64, 'bandwidth': None }, 'cc1.4xlarge': { 'id': 'cc1.4xlarge', 'name': 'Cluster Compute Quadruple Extra Large Instance', 'ram': 23552, 'disk': 1690, 'bandwidth': None }, 'cc2.8xlarge': { 'id': 'cc2.8xlarge', 'name': 'Cluster Compute Eight Extra Large Instance', 'ram': GiB(60.5), 'disk': 4 * 840, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, # c3 instances have 2 SSDs of the specified disk size 'c3.large': { 'id': 'c3.large', 'name': 'Compute Optimized Large Instance', 'ram': GiB(3.75), 'disk': 2 * 16, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'c3.xlarge': { 'id': 'c3.xlarge', 'name': 'Compute Optimized Extra Large Instance', 'ram': GiB(7.5), 'disk': 2 * 40, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'c3.2xlarge': { 'id': 'c3.2xlarge', 'name': 'Compute Optimized Double Extra Large Instance', 'ram': GiB(15), 'disk': 2 * 80, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'c3.4xlarge': { 'id': 'c3.4xlarge', 'name': 'Compute Optimized Quadruple Extra Large Instance', 'ram': GiB(30), 'disk': 2 * 160, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'c3.8xlarge': { 'id': 'c3.8xlarge', 'name': 'Compute Optimized Eight Extra Large Instance', 'ram': GiB(60), 'disk': 2 * 320, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'c4.large': { 'id': 'c4.large', 'name': 'Compute Optimized Large Instance', 'ram': GiB(3.75), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'c4.xlarge': { 'id': 'c4.xlarge', 'name': 'Compute Optimized Extra Large Instance', 'ram': GiB(7.5), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'c4.2xlarge': { 'id': 'c4.2xlarge', 'name': 'Compute Optimized Double Large Instance', 'ram': GiB(15), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'c4.4xlarge': { 'id': 'c4.4xlarge', 'name': 'Compute Optimized Quadruple Extra Large Instance', 'ram': GiB(30), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'c4.8xlarge': { 'id': 'c4.8xlarge', 'name': 'Compute Optimized Eight Extra Large Instance', 'ram': GiB(60), 'disk': 0, # EBS only 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'cr1.8xlarge': { 'id': 'cr1.8xlarge', 'name': 'High Memory Cluster Eight Extra Large', 'ram': GiB(244), 'disk': 2 * 120, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'hs1.4xlarge': { 'id': 'hs1.4xlarge', 'name': 'High Storage Quadruple Extra Large Instance', 'ram': GiB(64), 'disk': 2 * 1024, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'hs1.8xlarge': { 'id': 'hs1.8xlarge', 'name': 'High Storage Eight Extra Large Instance', 'ram': GiB(117), 'disk': 24 * 2000, 'bandwidth': None, 'extra': { 'cpu': 17 } }, # i2 instances have up to eight SSD drives 'i2.xlarge': { 'id': 'i2.xlarge', 'name': 'High I/O Storage Optimized Extra Large Instance', 'ram': GiB(30.5), 'disk': 800, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'i2.2xlarge': { 'id': 'i2.2xlarge', 'name': 'High I/O Storage Optimized Double Extra Large Instance', 'ram': GiB(61), 'disk': 2 * 800, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'i2.4xlarge': { 'id': 'i2.4xlarge', 'name': 'High I/O Storage Optimized Quadruple Large Instance', 'ram': GiB(122), 'disk': 4 * 800, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'i2.8xlarge': { 'id': 'i2.8xlarge', 'name': 'High I/O Storage Optimized Eight Extra Large Instance', 'ram': GiB(244), 'disk': 8 * 800, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'i3.large': { 'id': 'i3.large', 'name': 'High I/O Instances', 'ram': GiB(15.25), 'disk': 1 * 475, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'i3.xlarge': { 'id': 'i3.xlarge', 'name': 'High I/O Instances', 'ram': GiB(30.5), 'disk': 1 * 950, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'i3.2xlarge': { 'id': 'i3.2xlarge', 'name': 'High I/O Instances', 'ram': GiB(61), 'disk': 1 * 1900, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'i3.4xlarge': { 'id': 'i3.4xlarge', 'name': 'High I/O Instances', 'ram': GiB(122), 'disk': 2 * 1900, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'i3.8xlarge': { 'id': 'i3.8xlarge', 'name': 'High I/O Instances', 'ram': GiB(244), 'disk': 4 * 1900, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'i3.16xlarge': { 'id': 'i3.16xlarge', 'name': 'High I/O Instances', 'ram': GiB(488), 'disk': 8 * 1900, # GB 'bandwidth': None, 'extra': { 'cpu': 64 } }, 'd2.xlarge': { 'id': 'd2.xlarge', 'name': 'Dense Storage Optimized Extra Large Instance', 'ram': GiB(30.5), 'disk': 3 * 2000, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'd2.2xlarge': { 'id': 'd2.2xlarge', 'name': 'Dense Storage Optimized Double Extra Large Instance', 'ram': GiB(61), 'disk': 6 * 2000, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'd2.4xlarge': { 'id': 'd2.4xlarge', 'name': 'Dense Storage Optimized Quadruple Extra Large Instance', 'ram': GiB(122), 'disk': 12 * 2000, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'd2.8xlarge': { 'id': 'd2.8xlarge', 'name': 'Dense Storage Optimized Eight Extra Large Instance', 'ram': GiB(244), 'disk': 24 * 2000, # GB 'bandwidth': None, 'extra': { 'cpu': 36 } }, # 1x SSD 'r3.large': { 'id': 'r3.large', 'name': 'Memory Optimized Large instance', 'ram': GiB(15.25), 'disk': 32, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'r3.xlarge': { 'id': 'r3.xlarge', 'name': 'Memory Optimized Extra Large instance', 'ram': GiB(30.5), 'disk': 80, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'r3.2xlarge': { 'id': 'r3.2xlarge', 'name': 'Memory Optimized Double Extra Large instance', 'ram': GiB(61), 'disk': 160, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'r3.4xlarge': { 'id': 'r3.4xlarge', 'name': 'Memory Optimized Quadruple Extra Large instance', 'ram': GiB(122), 'disk': 320, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'r3.8xlarge': { 'id': 'r3.8xlarge', 'name': 'Memory Optimized Eight Extra Large instance', 'ram': GiB(244), 'disk': 2 * 320, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'r4.large': { 'id': 'r4.large', 'name': 'Memory Optimized Large instance', 'ram': GiB(15.25), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 2 } }, 'r4.xlarge': { 'id': 'r4.xlarge', 'name': 'Memory Optimized Extra Large instance', 'ram': GiB(30.5), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 4 } }, 'r4.2xlarge': { 'id': 'r4.2xlarge', 'name': 'Memory Optimized Double Extra Large instance', 'ram': GiB(61), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'r4.4xlarge': { 'id': 'r4.4xlarge', 'name': 'Memory Optimized Quadruple Extra Large instance', 'ram': GiB(122), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 16 } }, 'r4.8xlarge': { 'id': 'r4.8xlarge', 'name': 'Memory Optimized Eight Extra Large instance', 'ram': GiB(244), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 32 } }, 'r4.16xlarge': { 'id': 'r4.16xlarge', 'name': 'Memory Optimized Sixteen Extra Large instance', 'ram': GiB(488), 'disk': 0, # GB 'bandwidth': None, 'extra': { 'cpu': 64 } }, # Burstable Performance General Purpose 't2.nano': { 'id': 't2.nano', 'name': 'Burstable Performance Nano Instance', 'ram': 512, 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 1 } }, 't2.micro': { 'id': 't2.micro', 'name': 'Burstable Performance Micro Instance', 'ram': GiB(1), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 1 } }, 't2.small': { 'id': 't2.small', 'name': 'Burstable Performance Small Instance', 'ram': GiB(2), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 1 } }, 't2.medium': { 'id': 't2.medium', 'name': 'Burstable Performance Medium Instance', 'ram': GiB(4), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 2 } }, 't2.large': { 'id': 't2.large', 'name': 'Burstable Performance Medium Instance', 'ram': GiB(8), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 2 } }, 't2.xlarge': { 'id': 't2.xlarge', 'name': 'Burstable Performance Extra Large Instance', 'ram': GiB(16), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 4 } }, 't2.2xlarge': { 'id': 't2.2xlarge', 'name': 'Burstable Performance Double Extra Large Instance', 'ram': GiB(32), 'disk': 0, # EBS Only 'bandwidth': None, 'extra': { 'cpu': 8 } }, 'x1.32xlarge': { 'id': 'x1.32xlarge', 'name': 'Memory Optimized ThirtyTwo Extra Large instance', 'ram': GiB(1952), 'disk': 2 * 1920, # GB 'bandwidth': None, 'extra': { 'cpu': 128 } } } # From REGION_DETAILS = { # US East (Northern Virginia) Region 'us-east-1': { 'endpoint': 'ec2.us-east-1.amazonaws.com', 'api_name': 'ec2_us_east', 'country': 'USA', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cg1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'cr1.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # US West (Northern California) Region 'us-west-1': { 'endpoint': 'ec2.us-west-1.amazonaws.com', 'api_name': 'ec2_us_west', 'country': 'USA', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large' ] }, # US East (Ohio) Region 'us-east-2': { 'endpoint': 'ec2.us-east-2.amazonaws.com', 'api_name': 'ec2_us_east_ohio', 'country': 'USA', 'signature_version': '4', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cg1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'cr1.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # US West (Oregon) Region 'us-west-2': { 'endpoint': 'ec2.us-west-2.amazonaws.com', 'api_name': 'ec2_us_west_oregon', 'country': 'US', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'p2.xlarge', 'p2.8xlarge', 'p2.16xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # EU (Ireland) Region 'eu-west-1': { 'endpoint': 'ec2.eu-west-1.amazonaws.com', 'api_name': 'ec2_eu_west', 'country': 'Ireland', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # EU (London) Region 'eu-west-2': { 'endpoint': 'ec2.eu-west-2.amazonaws.com', 'api_name': 'ec2_eu_west_london', 'country': 'United Kingdom', 'signature_version': '4', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cg1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'cr1.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # EU (Frankfurt) Region 'eu-central-1': { 'endpoint': 'ec2.eu-central-1.amazonaws.com', 'api_name': 'ec2_eu_central', 'country': 'Frankfurt', 'signature_version': '4', 'instance_types': [ 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # Asia Pacific (Mumbai, India) Region 'ap-south-1': { 'endpoint': 'ec2.ap-south-1.amazonaws.com', 'api_name': 'ec2_ap_south_1', 'country': 'India', 'signature_version': '4', 'instance_types': [ 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge' ] }, # Asia Pacific (Singapore) Region 'ap-southeast-1': { 'endpoint': 'ec2.ap-southeast-1.amazonaws.com', 'api_name': 'ec2_ap_southeast', 'country': 'Singapore', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 'x1.32xlarge' ] }, # Asia Pacific (Tokyo) Region 'ap-northeast-1': { 'endpoint': 'ec2.ap-northeast-1.amazonaws.com', 'api_name': 'ec2_ap_northeast', 'country': 'Japan', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'g2.2xlarge', 'g2.8xlarge', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # Asia Pacific (Seoul) Region 'ap-northeast-2': { 'endpoint': 'ec2.ap-northeast-2.amazonaws.com', 'api_name': 'ec2_ap_northeast', 'country': 'South Korea', 'signature_version': '4', 'instance_types': [ 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # South America (Sao Paulo) Region 'sa-east-1': { 'endpoint': 'ec2.sa-east-1.amazonaws.com', 'api_name': 'ec2_sa_east', 'country': 'Brazil', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large' ] }, # Asia Pacific (Sydney) Region 'ap-southeast-2': { 'endpoint': 'ec2.ap-southeast-2.amazonaws.com', 'api_name': 'ec2_ap_southeast_2', 'country': 'Australia', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # Canada (Central) Region 'ca-central-1': { 'endpoint': 'ec2.ca-central-1.amazonaws.com', 'api_name': 'ec2_ca_central_1', 'country': 'Canada', 'signature_version': '4', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cg1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'cr1.8xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] }, # GovCloud Region 'us-gov-west-1': { 'endpoint': 'ec2.us-gov-west-1.amazonaws.com', 'api_name': 'ec2_us_govwest', 'country': 'US', 'signature_version': '2', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'c1.medium', 'c1.xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'hs1.4xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large' ] }, # China (North) Region 'cn-north-1': { 'endpoint': 'ec2.cn-north-1.amazonaws.com.cn', 'api_name': 'ec2_cn_north', 'country': 'China', 'signature_version': '4', 'instance_types': [ 't1.micro', 't2.micro', 't2.small', 't2.medium', 't2.large', 't2.xlarge', 't2.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm1.small', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', ] }, 'nimbus': { # Nimbus clouds have 3 EC2-style instance types but their particular # RAM allocations are configured by the admin 'country': 'custom', 'signature_version': '2', 'instance_types': [ 'm1.small', 'm1.large', 'm1.xlarge' ] } } """ Sizes must be hardcoded because Outscale doesn't provide an API to fetch them. Outscale cloud instances share some names with EC2 but have different specifications so declare them in another constant. """ OUTSCALE_INSTANCE_TYPES = { 't1.micro': { 'id': 't1.micro', 'name': 'Micro Instance', 'ram': 615, 'disk': 0, 'bandwidth': None }, 'm1.small': { 'id': 'm1.small', 'name': 'Standard Small Instance', 'ram': 1740, 'disk': 150, 'bandwidth': None }, 'm1.medium': { 'id': 'm1.medium', 'name': 'Standard Medium Instance', 'ram': 3840, 'disk': 420, 'bandwidth': None }, 'm1.large': { 'id': 'm1.large', 'name': 'Standard Large Instance', 'ram': 7680, 'disk': 840, 'bandwidth': None }, 'm1.xlarge': { 'id': 'm1.xlarge', 'name': 'Standard Extra Large Instance', 'ram': 15360, 'disk': 1680, 'bandwidth': None }, 'c1.medium': { 'id': 'c1.medium', 'name': 'Compute Optimized Medium Instance', 'ram': 1740, 'disk': 340, 'bandwidth': None }, 'c1.xlarge': { 'id': 'c1.xlarge', 'name': 'Compute Optimized Extra Large Instance', 'ram': 7168, 'disk': 1680, 'bandwidth': None }, 'c3.large': { 'id': 'c3.large', 'name': 'Compute Optimized Large Instance', 'ram': 3840, 'disk': 32, 'bandwidth': None }, 'c3.xlarge': { 'id': 'c3.xlarge', 'name': 'Compute Optimized Extra Large Instance', 'ram': 7168, 'disk': 80, 'bandwidth': None }, 'c3.2xlarge': { 'id': 'c3.2xlarge', 'name': 'Compute Optimized Double Extra Large Instance', 'ram': 15359, 'disk': 160, 'bandwidth': None }, 'c3.4xlarge': { 'id': 'c3.4xlarge', 'name': 'Compute Optimized Quadruple Extra Large Instance', 'ram': 30720, 'disk': 320, 'bandwidth': None }, 'c3.8xlarge': { 'id': 'c3.8xlarge', 'name': 'Compute Optimized Eight Extra Large Instance', 'ram': 61440, 'disk': 640, 'bandwidth': None }, 'm2.xlarge': { 'id': 'm2.xlarge', 'name': 'High Memory Extra Large Instance', 'ram': 17510, 'disk': 420, 'bandwidth': None }, 'm2.2xlarge': { 'id': 'm2.2xlarge', 'name': 'High Memory Double Extra Large Instance', 'ram': 35020, 'disk': 840, 'bandwidth': None }, 'm2.4xlarge': { 'id': 'm2.4xlarge', 'name': 'High Memory Quadruple Extra Large Instance', 'ram': 70042, 'disk': 1680, 'bandwidth': None }, 'nv1.small': { 'id': 'nv1.small', 'name': 'GPU Small Instance', 'ram': 1739, 'disk': 150, 'bandwidth': None }, 'nv1.medium': { 'id': 'nv1.medium', 'name': 'GPU Medium Instance', 'ram': 3839, 'disk': 420, 'bandwidth': None }, 'nv1.large': { 'id': 'nv1.large', 'name': 'GPU Large Instance', 'ram': 7679, 'disk': 840, 'bandwidth': None }, 'nv1.xlarge': { 'id': 'nv1.xlarge', 'name': 'GPU Extra Large Instance', 'ram': 15358, 'disk': 1680, 'bandwidth': None }, 'g2.2xlarge': { 'id': 'g2.2xlarge', 'name': 'GPU Double Extra Large Instance', 'ram': 15360, 'disk': 60, 'bandwidth': None }, 'cc1.4xlarge': { 'id': 'cc1.4xlarge', 'name': 'Cluster Compute Quadruple Extra Large Instance', 'ram': 24576, 'disk': 1680, 'bandwidth': None }, 'cc2.8xlarge': { 'id': 'cc2.8xlarge', 'name': 'Cluster Compute Eight Extra Large Instance', 'ram': 65536, 'disk': 3360, 'bandwidth': None }, 'hi1.xlarge': { 'id': 'hi1.xlarge', 'name': 'High Storage Extra Large Instance', 'ram': 15361, 'disk': 1680, 'bandwidth': None }, 'm3.xlarge': { 'id': 'm3.xlarge', 'name': 'High Storage Optimized Extra Large Instance', 'ram': 15357, 'disk': 0, 'bandwidth': None }, 'm3.2xlarge': { 'id': 'm3.2xlarge', 'name': 'High Storage Optimized Double Extra Large Instance', 'ram': 30720, 'disk': 0, 'bandwidth': None }, 'm3s.xlarge': { 'id': 'm3s.xlarge', 'name': 'High Storage Optimized Extra Large Instance', 'ram': 15359, 'disk': 0, 'bandwidth': None }, 'm3s.2xlarge': { 'id': 'm3s.2xlarge', 'name': 'High Storage Optimized Double Extra Large Instance', 'ram': 30719, 'disk': 0, 'bandwidth': None }, 'cr1.8xlarge': { 'id': 'cr1.8xlarge', 'name': 'Memory Optimized Eight Extra Large Instance', 'ram': 249855, 'disk': 240, 'bandwidth': None }, 'os1.2xlarge': { 'id': 'os1.2xlarge', 'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra ' 'Large Instance', 'ram': 65536, 'disk': 60, 'bandwidth': None }, 'os1.4xlarge': { 'id': 'os1.4xlarge', 'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext' 'ra Large Instance', 'ram': 131072, 'disk': 120, 'bandwidth': None }, 'os1.8xlarge': { 'id': 'os1.8xlarge', 'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L' 'arge Instance', 'ram': 249856, 'disk': 500, 'bandwidth': None }, 'oc1.4xlarge': { 'id': 'oc1.4xlarge', 'name': 'Outscale Quadruple Extra Large Instance', 'ram': 24575, 'disk': 1680, 'bandwidth': None }, 'oc2.8xlarge': { 'id': 'oc2.8xlarge', 'name': 'Outscale Eight Extra Large Instance', 'ram': 65535, 'disk': 3360, 'bandwidth': None } } """ The function manipulating Outscale cloud regions will be overridden because Outscale instances types are in a separate dict so also declare Outscale cloud regions in some other constants. """ OUTSCALE_SAS_REGION_DETAILS = { 'eu-west-3': { 'endpoint': 'api-ppd.outscale.com', 'api_name': 'osc_sas_eu_west_3', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'eu-west-1': { 'endpoint': 'api.eu-west-1.outscale.com', 'api_name': 'osc_sas_eu_west_1', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'eu-west-2': { 'endpoint': 'fcu.eu-west-2.outscale.com', 'api_name': 'osc_sas_eu_west_2', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'us-east-1': { 'endpoint': 'api.us-east-1.outscale.com', 'api_name': 'osc_sas_us_east_1', 'country': 'USA', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'us-east-2': { 'endpoint': 'fcu.us-east-2.outscale.com', 'api_name': 'osc_sas_us_east_2', 'country': 'USA', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] } } OUTSCALE_INC_REGION_DETAILS = { 'eu-west-1': { 'endpoint': 'api.eu-west-1.outscale.com', 'api_name': 'osc_inc_eu_west_1', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'p2.xlarge', 'p2.8xlarge', 'p2.16xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'eu-west-2': { 'endpoint': 'fcu.eu-west-2.outscale.com', 'api_name': 'osc_inc_eu_west_2', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'eu-west-3': { 'endpoint': 'api-ppd.outscale.com', 'api_name': 'osc_inc_eu_west_3', 'country': 'FRANCE', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'us-east-1': { 'endpoint': 'api.us-east-1.outscale.com', 'api_name': 'osc_inc_us_east_1', 'country': 'USA', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] }, 'us-east-2': { 'endpoint': 'fcu.us-east-2.outscale.com', 'api_name': 'osc_inc_us_east_2', 'country': 'USA', 'instance_types': [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'nv1.small', 'nv1.medium', 'nv1.large', 'nv1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'cr1.8xlarge', 'os1.8xlarge' ] } } """ Define the extra dictionary for specific resources """ RESOURCE_EXTRA_ATTRIBUTES_MAP = { 'ebs_instance_block_device': { 'attach_time': { 'xpath': 'ebs/attachTime', 'transform_func': parse_date }, 'delete': { 'xpath': 'ebs/deleteOnTermination', 'transform_func': str }, 'status': { 'xpath': 'ebs/status', 'transform_func': str }, 'volume_id': { 'xpath': 'ebs/volumeId', 'transform_func': str } }, 'ebs_volume': { 'snapshot_id': { 'xpath': 'ebs/snapshotId', 'transform_func': str }, 'volume_id': { 'xpath': 'ebs/volumeId', 'transform_func': str }, 'volume_size': { 'xpath': 'ebs/volumeSize', 'transform_func': int }, 'delete': { 'xpath': 'ebs/deleteOnTermination', 'transform_func': str }, 'volume_type': { 'xpath': 'ebs/volumeType', 'transform_func': str }, 'iops': { 'xpath': 'ebs/iops', 'transform_func': int } }, 'elastic_ip': { 'allocation_id': { 'xpath': 'allocationId', 'transform_func': str, }, 'association_id': { 'xpath': 'associationId', 'transform_func': str, }, 'interface_id': { 'xpath': 'networkInterfaceId', 'transform_func': str, }, 'owner_id': { 'xpath': 'networkInterfaceOwnerId', 'transform_func': str, }, 'private_ip': { 'xpath': 'privateIp', 'transform_func': str, } }, 'image': { 'state': { 'xpath': 'imageState', 'transform_func': str }, 'owner_id': { 'xpath': 'imageOwnerId', 'transform_func': str }, 'owner_alias': { 'xpath': 'imageOwnerAlias', 'transform_func': str }, 'is_public': { 'xpath': 'isPublic', 'transform_func': str }, 'architecture': { 'xpath': 'architecture', 'transform_func': str }, 'image_type': { 'xpath': 'imageType', 'transform_func': str }, 'image_location': { 'xpath': 'imageLocation', 'transform_func': str }, 'platform': { 'xpath': 'platform', 'transform_func': str }, 'description': { 'xpath': 'description', 'transform_func': str }, 'root_device_type': { 'xpath': 'rootDeviceType', 'transform_func': str }, 'virtualization_type': { 'xpath': 'virtualizationType', 'transform_func': str }, 'hypervisor': { 'xpath': 'hypervisor', 'transform_func': str }, 'kernel_id': { 'xpath': 'kernelId', 'transform_func': str }, 'ramdisk_id': { 'xpath': 'ramdiskId', 'transform_func': str }, 'ena_support': { 'xpath': 'enaSupport', 'transform_func': str }, 'sriov_net_support': { 'xpath': 'sriovNetSupport', 'transform_func': str } }, 'network': { 'state': { 'xpath': 'state', 'transform_func': str }, 'dhcp_options_id': { 'xpath': 'dhcpOptionsId', 'transform_func': str }, 'instance_tenancy': { 'xpath': 'instanceTenancy', 'transform_func': str }, 'is_default': { 'xpath': 'isDefault', 'transform_func': str } }, 'network_interface': { 'subnet_id': { 'xpath': 'subnetId', 'transform_func': str }, 'vpc_id': { 'xpath': 'vpcId', 'transform_func': str }, 'zone': { 'xpath': 'availabilityZone', 'transform_func': str }, 'description': { 'xpath': 'description', 'transform_func': str }, 'owner_id': { 'xpath': 'ownerId', 'transform_func': str }, 'mac_address': { 'xpath': 'macAddress', 'transform_func': str }, 'private_dns_name': { 'xpath': 'privateIpAddressesSet/privateDnsName', 'transform_func': str }, 'source_dest_check': { 'xpath': 'sourceDestCheck', 'transform_func': str } }, 'network_interface_attachment': { 'attachment_id': { 'xpath': 'attachment/attachmentId', 'transform_func': str }, 'instance_id': { 'xpath': 'attachment/instanceId', 'transform_func': str }, 'owner_id': { 'xpath': 'attachment/instanceOwnerId', 'transform_func': str }, 'device_index': { 'xpath': 'attachment/deviceIndex', 'transform_func': int }, 'status': { 'xpath': 'attachment/status', 'transform_func': str }, 'attach_time': { 'xpath': 'attachment/attachTime', 'transform_func': parse_date }, 'delete': { 'xpath': 'attachment/deleteOnTermination', 'transform_func': str } }, 'node': { 'availability': { 'xpath': 'placement/availabilityZone', 'transform_func': str }, 'architecture': { 'xpath': 'architecture', 'transform_func': str }, 'client_token': { 'xpath': 'clientToken', 'transform_func': str }, 'dns_name': { 'xpath': 'dnsName', 'transform_func': str }, 'hypervisor': { 'xpath': 'hypervisor', 'transform_func': str }, 'iam_profile': { 'xpath': 'iamInstanceProfile/id', 'transform_func': str }, 'image_id': { 'xpath': 'imageId', 'transform_func': str }, 'instance_id': { 'xpath': 'instanceId', 'transform_func': str }, 'instance_lifecycle': { 'xpath': 'instanceLifecycle', 'transform_func': str }, 'instance_tenancy': { 'xpath': 'placement/tenancy', 'transform_func': str }, 'instance_type': { 'xpath': 'instanceType', 'transform_func': str }, 'key_name': { 'xpath': 'keyName', 'transform_func': str }, 'launch_index': { 'xpath': 'amiLaunchIndex', 'transform_func': int }, 'launch_time': { 'xpath': 'launchTime', 'transform_func': str }, 'kernel_id': { 'xpath': 'kernelId', 'transform_func': str }, 'monitoring': { 'xpath': 'monitoring/state', 'transform_func': str }, 'platform': { 'xpath': 'platform', 'transform_func': str }, 'private_dns': { 'xpath': 'privateDnsName', 'transform_func': str }, 'ramdisk_id': { 'xpath': 'ramdiskId', 'transform_func': str }, 'root_device_type': { 'xpath': 'rootDeviceType', 'transform_func': str }, 'root_device_name': { 'xpath': 'rootDeviceName', 'transform_func': str }, 'reason': { 'xpath': 'reason', 'transform_func': str }, 'source_dest_check': { 'xpath': 'sourceDestCheck', 'transform_func': str }, 'status': { 'xpath': 'instanceState/name', 'transform_func': str }, 'subnet_id': { 'xpath': 'subnetId', 'transform_func': str }, 'virtualization_type': { 'xpath': 'virtualizationType', 'transform_func': str }, 'ebs_optimized': { 'xpath': 'ebsOptimized', 'transform_func': str }, 'vpc_id': { 'xpath': 'vpcId', 'transform_func': str } }, 'reserved_node': { 'instance_type': { 'xpath': 'instanceType', 'transform_func': str }, 'availability': { 'xpath': 'availabilityZone', 'transform_func': str }, 'start': { 'xpath': 'start', 'transform_func': str }, 'end': { 'xpath': 'end', 'transform_func': str }, 'duration': { 'xpath': 'duration', 'transform_func': int }, 'usage_price': { 'xpath': 'usagePrice', 'transform_func': float }, 'fixed_price': { 'xpath': 'fixedPrice', 'transform_func': float }, 'instance_count': { 'xpath': 'instanceCount', 'transform_func': int }, 'description': { 'xpath': 'productDescription', 'transform_func': str }, 'instance_tenancy': { 'xpath': 'instanceTenancy', 'transform_func': str }, 'currency_code': { 'xpath': 'currencyCode', 'transform_func': str }, 'offering_type': { 'xpath': 'offeringType', 'transform_func': str } }, 'security_group': { 'vpc_id': { 'xpath': 'vpcId', 'transform_func': str }, 'description': { 'xpath': 'groupDescription', 'transform_func': str }, 'owner_id': { 'xpath': 'ownerId', 'transform_func': str } }, 'snapshot': { 'volume_id': { 'xpath': 'volumeId', 'transform_func': str }, 'state': { 'xpath': 'status', 'transform_func': str }, 'description': { 'xpath': 'description', 'transform_func': str }, 'progress': { 'xpath': 'progress', 'transform_func': str }, 'start_time': { 'xpath': 'startTime', 'transform_func': parse_date } }, 'subnet': { 'cidr_block': { 'xpath': 'cidrBlock', 'transform_func': str }, 'available_ips': { 'xpath': 'availableIpAddressCount', 'transform_func': int }, 'zone': { 'xpath': 'availabilityZone', 'transform_func': str }, 'vpc_id': { 'xpath': 'vpcId', 'transform_func': str } }, 'volume': { 'device': { 'xpath': 'attachmentSet/item/device', 'transform_func': str }, 'snapshot_id': { 'xpath': 'snapshotId', 'transform_func': lambda v: str(v) or None }, 'iops': { 'xpath': 'iops', 'transform_func': int }, 'zone': { 'xpath': 'availabilityZone', 'transform_func': str }, 'create_time': { 'xpath': 'createTime', 'transform_func': parse_date }, 'state': { 'xpath': 'status', 'transform_func': str }, 'attach_time': { 'xpath': 'attachmentSet/item/attachTime', 'transform_func': parse_date }, 'attachment_status': { 'xpath': 'attachmentSet/item/status', 'transform_func': str }, 'instance_id': { 'xpath': 'attachmentSet/item/instanceId', 'transform_func': str }, 'delete': { 'xpath': 'attachmentSet/item/deleteOnTermination', 'transform_func': str }, 'volume_type': { 'xpath': 'volumeType', 'transform_func': str } }, 'route_table': { 'vpc_id': { 'xpath': 'vpcId', 'transform_func': str } } } VOLUME_MODIFICATION_ATTRIBUTE_MAP = { 'end_time': { 'xpath': 'endTime', 'transform_func': parse_date }, 'modification_state': { 'xpath': 'modificationState', 'transform_func': str }, 'original_iops': { 'xpath': 'originalIops', 'transform_func': int }, 'original_size': { 'xpath': 'originalSize', 'transform_func': int }, 'original_volume_type': { 'xpath': 'originalVolumeType', 'transform_func': str }, 'progress': { 'xpath': 'progress', 'transform_func': int }, 'start_time': { 'xpath': 'startTime', 'transform_func': parse_date }, 'status_message': { 'xpath': 'statusMessage', 'transform_func': str }, 'target_iops': { 'xpath': 'targetIops', 'transform_func': int }, 'target_size': { 'xpath': 'targetSize', 'transform_func': int }, 'target_volume_type': { 'xpath': 'targetVolumeType', 'transform_func': str }, 'volume_id': { 'xpath': 'volumeId', 'transform_func': str } } VALID_EC2_REGIONS = REGION_DETAILS.keys() VALID_EC2_REGIONS = [ r for r in VALID_EC2_REGIONS if r != 'nimbus' and r != 'cn-north-1' ] VALID_VOLUME_TYPES = ['standard', 'io1', 'gp2', 'st1', 'sc1'] class EC2NodeLocation(NodeLocation): def __init__(self, id, name, country, driver, availability_zone): super(EC2NodeLocation, self).__init__(id, name, country, driver) self.availability_zone = availability_zone def __repr__(self): return (('') % (self.id, self.name, self.country, self.availability_zone, self.driver.name)) class EC2Response(AWSBaseResponse): """ EC2 specific response parsing and error handling. """ def parse_error(self): err_list = [] # Okay, so for Eucalyptus, you can get a 403, with no body, # if you are using the wrong user/password. msg = "Failure: 403 Forbidden" if self.status == 403 and self.body[:len(msg)] == msg: raise InvalidCredsError(msg) try: body = ET.XML(self.body) except: raise MalformedResponseError("Failed to parse XML", body=self.body, driver=EC2NodeDriver) for err in body.findall('Errors/Error'): code, message = err.getchildren() err_list.append('%s: %s' % (code.text, message.text)) if code.text == 'InvalidClientTokenId': raise InvalidCredsError(err_list[-1]) if code.text == 'SignatureDoesNotMatch': raise InvalidCredsError(err_list[-1]) if code.text == 'AuthFailure': raise InvalidCredsError(err_list[-1]) if code.text == 'OptInRequired': raise InvalidCredsError(err_list[-1]) if code.text == 'IdempotentParameterMismatch': raise IdempotentParamError(err_list[-1]) if code.text == 'InvalidKeyPair.NotFound': # TODO: Use connection context instead match = re.match(r'.*\'(.+?)\'.*', message.text) if match: name = match.groups()[0] else: name = None raise KeyPairDoesNotExistError(name=name, driver=self.connection.driver) return '\n'.join(err_list) class EC2Connection(SignedAWSConnection): """ Represents a single connection to the EC2 Endpoint. """ version = API_VERSION host = REGION_DETAILS['us-east-1']['endpoint'] responseCls = EC2Response service_name = 'ec2' class ExEC2AvailabilityZone(object): """ Extension class which stores information about an EC2 availability zone. Note: This class is EC2 specific. """ def __init__(self, name, zone_state, region_name): self.name = name self.zone_state = zone_state self.region_name = region_name def __repr__(self): return (('') % (self.name, self.zone_state, self.region_name)) class EC2ReservedNode(Node): """ Class which stores information about EC2 reserved instances/nodes Inherits from Node and passes in None for name and private/public IPs Note: This class is EC2 specific. """ def __init__(self, id, state, driver, size=None, image=None, extra=None): super(EC2ReservedNode, self).__init__(id=id, name=None, state=state, public_ips=None, private_ips=None, driver=driver, extra=extra) def __repr__(self): return (('') % (self.id)) class EC2SecurityGroup(object): """ Represents information about a Security group Note: This class is EC2 specific. """ def __init__(self, id, name, ingress_rules, egress_rules, extra=None): self.id = id self.name = name self.ingress_rules = ingress_rules self.egress_rules = egress_rules self.extra = extra or {} def __repr__(self): return (('' % (self.name, self.strategy) class EC2Network(object): """ Represents information about a VPC (Virtual Private Cloud) network Note: This class is EC2 specific. """ def __init__(self, id, name, cidr_block, extra=None): self.id = id self.name = name self.cidr_block = cidr_block self.extra = extra or {} def __repr__(self): return (('') % (self.ip, self.domain, self.instance_id)) class VPCInternetGateway(object): """ Class which stores information about VPC Internet Gateways. Note: This class is VPC specific. """ def __init__(self, id, name, vpc_id, state, driver, extra=None): self.id = id self.name = name self.vpc_id = vpc_id self.state = state self.extra = extra or {} def __repr__(self): return (('') % (self.id)) class EC2RouteTable(object): """ Class which stores information about VPC Route Tables. Note: This class is VPC specific. """ def __init__(self, id, name, routes, subnet_associations, propagating_gateway_ids, extra=None): """ :param id: The ID of the route table. :type id: ``str`` :param name: The name of the route table. :type name: ``str`` :param routes: A list of routes in the route table. :type routes: ``list`` of :class:`EC2Route` :param subnet_associations: A list of associations between the route table and one or more subnets. :type subnet_associations: ``list`` of :class:`EC2SubnetAssociation` :param propagating_gateway_ids: The list of IDs of any virtual private gateways propagating the routes. :type propagating_gateway_ids: ``list`` """ self.id = id self.name = name self.routes = routes self.subnet_associations = subnet_associations self.propagating_gateway_ids = propagating_gateway_ids self.extra = extra or {} def __repr__(self): return (('') % (self.id)) class EC2Route(object): """ Class which stores information about a Route. Note: This class is VPC specific. """ def __init__(self, cidr, gateway_id, instance_id, owner_id, interface_id, state, origin, vpc_peering_connection_id): """ :param cidr: The CIDR block used for the destination match. :type cidr: ``str`` :param gateway_id: The ID of a gateway attached to the VPC. :type gateway_id: ``str`` :param instance_id: The ID of a NAT instance in the VPC. :type instance_id: ``str`` :param owner_id: The AWS account ID of the owner of the instance. :type owner_id: ``str`` :param interface_id: The ID of the network interface. :type interface_id: ``str`` :param state: The state of the route (active | blackhole). :type state: ``str`` :param origin: Describes how the route was created. :type origin: ``str`` :param vpc_peering_connection_id: The ID of the VPC peering connection. :type vpc_peering_connection_id: ``str`` """ self.cidr = cidr self.gateway_id = gateway_id self.instance_id = instance_id self.owner_id = owner_id self.interface_id = interface_id self.state = state self.origin = origin self.vpc_peering_connection_id = vpc_peering_connection_id def __repr__(self): return (('') % (self.cidr)) class EC2SubnetAssociation(object): """ Class which stores information about Route Table associated with a given Subnet in a VPC Note: This class is VPC specific. """ def __init__(self, id, route_table_id, subnet_id, main=False): """ :param id: The ID of the subnet association in the VPC. :type id: ``str`` :param route_table_id: The ID of a route table in the VPC. :type route_table_id: ``str`` :param subnet_id: The ID of a subnet in the VPC. :type subnet_id: ``str`` :param main: If true, means this is a main VPC route table. :type main: ``bool`` """ self.id = id self.route_table_id = route_table_id self.subnet_id = subnet_id self.main = main def __repr__(self): return (('') % (self.id)) class EC2VolumeModification(object): """ Describes the modification status of an EBS volume. If the volume has never been modified, some element values will be null. """ def __init__(self, end_time=None, modification_state=None, original_iops=None, original_size=None, original_volume_type=None, progress=None, start_time=None, status_message=None, target_iops=None, target_size=None, target_volume_type=None, volume_id=None): self.end_time = end_time self.modification_state = modification_state self.original_iops = original_iops self.original_size = original_size self.original_volume_type = original_volume_type self.progress = progress self.start_time = start_time self.status_message = status_message self.target_iops = target_iops self.target_size = target_size self.target_volume_type = target_volume_type self.volume_id = volume_id def __repr__(self): return (('') % (self.end_time, self.modification_state, self.original_iops, self.original_size, self.original_volume_type, self.progress, self.start_time, self.status_message, self.target_iops, self.target_size, self.target_volume_type, self.volume_id)) class BaseEC2NodeDriver(NodeDriver): """ Base Amazon EC2 node driver. Used for main EC2 and other derivate driver classes to inherit from it. """ connectionCls = EC2Connection features = {'create_node': ['ssh_key']} path = '/' signature_version = DEFAULT_SIGNATURE_VERSION NODE_STATE_MAP = { 'pending': NodeState.PENDING, 'running': NodeState.RUNNING, 'shutting-down': NodeState.UNKNOWN, 'terminated': NodeState.TERMINATED } # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Volume.html VOLUME_STATE_MAP = { 'available': StorageVolumeState.AVAILABLE, 'in-use': StorageVolumeState.INUSE, 'error': StorageVolumeState.ERROR, 'creating': StorageVolumeState.CREATING, 'deleting': StorageVolumeState.DELETING, 'deleted': StorageVolumeState.DELETED, 'error_deleting': StorageVolumeState.ERROR } SNAPSHOT_STATE_MAP = { 'pending': VolumeSnapshotState.CREATING, 'completed': VolumeSnapshotState.AVAILABLE, 'error': VolumeSnapshotState.ERROR, } def list_nodes(self, ex_node_ids=None, ex_filters=None): """ Lists all nodes. Ex_node_ids parameter is used to filter the list of nodes that should be returned. Only the nodes with the corresponding node IDs will be returned. :param ex_node_ids: List of ``node.id`` :type ex_node_ids: ``list`` of ``str`` :param ex_filters: The filters so that the list includes information for certain nodes only. :type ex_filters: ``dict`` :rtype: ``list`` of :class:`Node` """ params = {'Action': 'DescribeInstances'} if ex_node_ids: params.update(self._pathlist('InstanceId', ex_node_ids)) if ex_filters: params.update(self._build_filters(ex_filters)) elem = self.connection.request(self.path, params=params).object nodes = [] for rs in findall(element=elem, xpath='reservationSet/item', namespace=NAMESPACE): nodes += self._to_nodes(rs, 'instancesSet/item') nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) for node in nodes: ips = nodes_elastic_ips_mappings[node.id] node.public_ips.extend(ips) return nodes def list_sizes(self, location=None): available_types = REGION_DETAILS[self.region_name]['instance_types'] sizes = [] for instance_type in available_types: attributes = INSTANCE_TYPES[instance_type] attributes = copy.deepcopy(attributes) price = self._get_size_price(size_id=instance_type) attributes.update({'price': price}) sizes.append(NodeSize(driver=self, **attributes)) return sizes def list_images(self, location=None, ex_image_ids=None, ex_owner=None, ex_executableby=None, ex_filters=None): """ Lists all images @inherits: :class:`NodeDriver.list_images` Ex_image_ids parameter is used to filter the list of images that should be returned. Only the images with the corresponding image IDs will be returned. Ex_owner parameter is used to filter the list of images that should be returned. Only the images with the corresponding owner will be returned. Valid values: amazon|aws-marketplace|self|all|aws id Ex_executableby parameter describes images for which the specified user has explicit launch permissions. The user can be an AWS account ID, self to return images for which the sender of the request has explicit launch permissions, or all to return images with public launch permissions. Valid values: all|self|aws id Ex_filters parameter is used to filter the list of images that should be returned. Only images matching the filter will be returned. :param ex_image_ids: List of ``NodeImage.id`` :type ex_image_ids: ``list`` of ``str`` :param ex_owner: Owner name :type ex_owner: ``str`` :param ex_executableby: Executable by :type ex_executableby: ``str`` :param ex_filters: Filter by :type ex_filters: ``dict`` :rtype: ``list`` of :class:`NodeImage` """ params = {'Action': 'DescribeImages'} if ex_owner: params.update({'Owner.1': ex_owner}) if ex_executableby: params.update({'ExecutableBy.1': ex_executableby}) if ex_image_ids: for index, image_id in enumerate(ex_image_ids): index += 1 params.update({'ImageId.%s' % (index): image_id}) if ex_filters: params.update(self._build_filters(ex_filters)) images = self._to_images( self.connection.request(self.path, params=params).object ) return images def get_image(self, image_id): """ Gets an image based on an image_id. :param image_id: Image identifier :type image_id: ``str`` :return: A NodeImage object :rtype: :class:`NodeImage` """ images = self.list_images(ex_image_ids=[image_id]) image = images[0] return image def list_locations(self): locations = [] for index, availability_zone in \ enumerate(self.ex_list_availability_zones()): locations.append(EC2NodeLocation( index, availability_zone.name, self.country, self, availability_zone) ) return locations def list_volumes(self, node=None): params = { 'Action': 'DescribeVolumes', } if node: filters = {'attachment.instance-id': node.id} params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params).object volumes = [self._to_volume(el) for el in response.findall( fixxpath(xpath='volumeSet/item', namespace=NAMESPACE)) ] return volumes def create_node(self, **kwargs): """ Create a new EC2 node. Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com] @inherits: :class:`NodeDriver.create_node` :keyword ex_keyname: The name of the key pair :type ex_keyname: ``str`` :keyword ex_userdata: User data :type ex_userdata: ``str`` :keyword ex_security_groups: A list of names of security groups to assign to the node. :type ex_security_groups: ``list`` :keyword ex_security_group_ids: A list of ids of security groups to assign to the node.[for VPC nodes only] :type ex_security_group_ids: ``list`` :keyword ex_metadata: Key/Value metadata to associate with a node :type ex_metadata: ``dict`` :keyword ex_mincount: Minimum number of instances to launch :type ex_mincount: ``int`` :keyword ex_maxcount: Maximum number of instances to launch :type ex_maxcount: ``int`` :keyword ex_clienttoken: Unique identifier to ensure idempotency :type ex_clienttoken: ``str`` :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device mappings. :type ex_blockdevicemappings: ``list`` of ``dict`` :keyword ex_iamprofile: Name or ARN of IAM profile :type ex_iamprofile: ``str`` :keyword ex_ebs_optimized: EBS-Optimized if True :type ex_ebs_optimized: ``bool`` :keyword ex_subnet: The subnet to launch the instance into. :type ex_subnet: :class:`.EC2Subnet` :keyword ex_placement_group: The name of the placement group to launch the instance into. :type ex_placement_group: ``str`` :keyword ex_assign_public_ip: If True, the instance will be assigned a public ip address. Note : It takes takes a short while for the instance to be assigned the public ip so the node returned will NOT have the public ip assigned yet. :type ex_assign_public_ip: ``bool`` :keyword ex_terminate_on_shutdown: Indicates if the instance should be terminated instead of just shut down when using the operating systems command for system shutdown. :type ex_terminate_on_shutdown: ``bool`` """ image = kwargs["image"] size = kwargs["size"] params = { 'Action': 'RunInstances', 'ImageId': image.id, 'MinCount': str(kwargs.get('ex_mincount', '1')), 'MaxCount': str(kwargs.get('ex_maxcount', '1')), 'InstanceType': size.id } if kwargs.get("ex_terminate_on_shutdown", False): params["InstanceInitiatedShutdownBehavior"] = "terminate" if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs: raise ValueError('You can only supply ex_security_groups or' ' ex_securitygroup') # ex_securitygroup is here for backward compatibility ex_security_groups = kwargs.get('ex_security_groups', None) ex_securitygroup = kwargs.get('ex_securitygroup', None) security_groups = ex_security_groups or ex_securitygroup if security_groups: if not isinstance(security_groups, (tuple, list)): security_groups = [security_groups] for sig in range(len(security_groups)): params['SecurityGroup.%d' % (sig + 1,)] =\ security_groups[sig] if 'ex_security_group_ids' in kwargs and 'ex_subnet' not in kwargs: raise ValueError('You can only supply ex_security_group_ids' ' combinated with ex_subnet') security_group_ids = kwargs.get('ex_security_group_ids', None) security_group_id_params = {} if security_group_ids: if not isinstance(security_group_ids, (tuple, list)): security_group_ids = [security_group_ids] for sig in range(len(security_group_ids)): security_group_id_params['SecurityGroupId.%d' % (sig + 1,)] =\ security_group_ids[sig] if 'location' in kwargs: availability_zone = getattr(kwargs['location'], 'availability_zone', None) if availability_zone: if availability_zone.region_name != self.region_name: raise AttributeError('Invalid availability zone: %s' % (availability_zone.name)) params['Placement.AvailabilityZone'] = availability_zone.name if 'auth' in kwargs and 'ex_keyname' in kwargs: raise AttributeError('Cannot specify auth and ex_keyname together') if 'auth' in kwargs: auth = self._get_and_check_auth(kwargs['auth']) key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey) params['KeyName'] = key['keyName'] if 'ex_keyname' in kwargs: params['KeyName'] = kwargs['ex_keyname'] if 'ex_userdata' in kwargs: params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\ .decode('utf-8') if 'ex_clienttoken' in kwargs: params['ClientToken'] = kwargs['ex_clienttoken'] if 'ex_blockdevicemappings' in kwargs: params.update(self._get_block_device_mapping_params( kwargs['ex_blockdevicemappings'])) if 'ex_iamprofile' in kwargs: if not isinstance(kwargs['ex_iamprofile'], basestring): raise AttributeError('ex_iamprofile not string') if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'): params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile'] else: params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile'] if 'ex_ebs_optimized' in kwargs: params['EbsOptimized'] = kwargs['ex_ebs_optimized'] subnet_id = None if 'ex_subnet' in kwargs: subnet_id = kwargs['ex_subnet'].id if 'ex_placement_group' in kwargs and kwargs['ex_placement_group']: params['Placement.GroupName'] = kwargs['ex_placement_group'] assign_public_ip = kwargs.get('ex_assign_public_ip', False) # In the event that a public ip is requested a NetworkInterface # needs to be specified. Some properties that would # normally be at the root (security group ids and subnet id) # need to be moved to the level of the NetworkInterface because # the NetworkInterface is no longer created implicitly if assign_public_ip: root_key = 'NetworkInterface.1.' params[root_key + 'AssociatePublicIpAddress'] = "true" # This means that when the instance is terminated, the # NetworkInterface we created for the instance will be # deleted automatically params[root_key + 'DeleteOnTermination'] = "true" # Required to be 0 if we are associating a public ip params[root_key + 'DeviceIndex'] = "0" if subnet_id: params[root_key + 'SubnetId'] = subnet_id for key, security_group_id in security_group_id_params.items(): key = root_key + key params[key] = security_group_id else: params.update(security_group_id_params) if subnet_id: params['SubnetId'] = subnet_id # Specify tags at instance creation time tags = {'Name': kwargs['name']} if 'ex_metadata' in kwargs: tags.update(kwargs['ex_metadata']) tagspec_root = 'TagSpecification.1.' params[tagspec_root + 'ResourceType'] = 'instance' tag_nr = 1 for k, v in tags.items(): tag_root = tagspec_root + 'Tag.%d.' % tag_nr params[tag_root + 'Key'] = k params[tag_root + 'Value'] = v tag_nr += 1 object = self.connection.request(self.path, params=params).object nodes = self._to_nodes(object, 'instancesSet/item') for node in nodes: node.name = kwargs['name'] node.extra.update({'tags': tags}) if len(nodes) == 1: return nodes[0] else: return nodes def reboot_node(self, node): params = {'Action': 'RebootInstances'} params.update(self._pathlist('InstanceId', [node.id])) res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def destroy_node(self, node): params = {'Action': 'TerminateInstances'} params.update(self._pathlist('InstanceId', [node.id])) res = self.connection.request(self.path, params=params).object return self._get_terminate_boolean(res) def create_volume(self, size, name, location=None, snapshot=None, ex_volume_type='standard', ex_iops=None, ex_encrypted=None, ex_kms_key_id=None): """ Create a new volume. :param size: Size of volume in gigabytes (required) :type size: ``int`` :param name: Name of the volume to be created :type name: ``str`` :param location: Which data center to create a volume in. If empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param snapshot: Snapshot from which to create the new volume. (optional) :type snapshot: :class:`.VolumeSnapshot` :param location: Datacenter in which to create a volume in. :type location: :class:`.ExEC2AvailabilityZone` :param ex_volume_type: Type of volume to create. :type ex_volume_type: ``str`` :param iops: The number of I/O operations per second (IOPS) that the volume supports. Only used if ex_volume_type is io1. :type iops: ``int`` :param ex_encrypted: Specifies whether the volume should be encrypted. :type ex_encrypted: ``bool`` :param ex_kms_key_id: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. Example: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123 -456a-a12b-a123b4cd56ef. Only used if encrypted is set to True. :type ex_kms_key_id: ``str`` :return: The newly created volume. :rtype: :class:`StorageVolume` """ params = { 'Action': 'CreateVolume', 'Size': str(size)} if ex_volume_type and ex_volume_type not in VALID_VOLUME_TYPES: raise ValueError('Invalid volume type specified: %s' % (ex_volume_type)) if snapshot: params['SnapshotId'] = snapshot.id if location is not None: params['AvailabilityZone'] = location.availability_zone.name if ex_volume_type: params['VolumeType'] = ex_volume_type if ex_volume_type == 'io1' and ex_iops: params['Iops'] = ex_iops if ex_encrypted is not None: params['Encrypted'] = 1 if ex_kms_key_id is not None: params['KmsKeyId'] = ex_kms_key_id volume = self._to_volume( self.connection.request(self.path, params=params).object, name=name) if self.ex_create_tags(volume, {'Name': name}): volume.extra['tags']['Name'] = name return volume def attach_volume(self, node, volume, device): params = { 'Action': 'AttachVolume', 'VolumeId': volume.id, 'InstanceId': node.id, 'Device': device} self.connection.request(self.path, params=params) return True def detach_volume(self, volume, ex_force=False): params = { 'Action': 'DetachVolume', 'VolumeId': volume.id} if ex_force: params['Force'] = 1 self.connection.request(self.path, params=params) return True def destroy_volume(self, volume): params = { 'Action': 'DeleteVolume', 'VolumeId': volume.id} response = self.connection.request(self.path, params=params).object return self._get_boolean(response) def create_volume_snapshot(self, volume, name=None): """ Create snapshot from volume :param volume: Instance of ``StorageVolume`` :type volume: ``StorageVolume`` :param name: Name of snapshot (optional) :type name: ``str`` :rtype: :class:`VolumeSnapshot` """ params = { 'Action': 'CreateSnapshot', 'VolumeId': volume.id, } if name: params.update({ 'Description': name, }) response = self.connection.request(self.path, params=params).object snapshot = self._to_snapshot(response, name) if name and self.ex_create_tags(snapshot, {'Name': name}): snapshot.extra['tags']['Name'] = name return snapshot def list_volume_snapshots(self, volume): return [snapshot for snapshot in self.list_snapshots(owner='self') if snapshot.extra["volume_id"] == volume.id] def list_snapshots(self, snapshot=None, owner=None): """ Describes all snapshots. :param snapshot: If provided, only returns snapshot information for the provided snapshot. :param owner: The owner of the snapshot: self|amazon|ID :type owner: ``str`` :rtype: ``list`` of :class:`VolumeSnapshot` """ params = { 'Action': 'DescribeSnapshots', } if snapshot: params.update({ 'SnapshotId.1': snapshot.id, }) if owner: params.update({ 'Owner.1': owner, }) response = self.connection.request(self.path, params=params).object snapshots = self._to_snapshots(response) return snapshots def destroy_volume_snapshot(self, snapshot): params = { 'Action': 'DeleteSnapshot', 'SnapshotId': snapshot.id } response = self.connection.request(self.path, params=params).object return self._get_boolean(response) # Key pair management methods def list_key_pairs(self): params = { 'Action': 'DescribeKeyPairs' } response = self.connection.request(self.path, params=params) elems = findall(element=response.object, xpath='keySet/item', namespace=NAMESPACE) key_pairs = self._to_key_pairs(elems=elems) return key_pairs def get_key_pair(self, name): params = { 'Action': 'DescribeKeyPairs', 'KeyName': name } response = self.connection.request(self.path, params=params) elems = findall(element=response.object, xpath='keySet/item', namespace=NAMESPACE) key_pair = self._to_key_pairs(elems=elems)[0] return key_pair def create_key_pair(self, name): params = { 'Action': 'CreateKeyPair', 'KeyName': name } response = self.connection.request(self.path, params=params) elem = response.object key_pair = self._to_key_pair(elem=elem) return key_pair def import_key_pair_from_string(self, name, key_material): base64key = ensure_string(base64.b64encode(b(key_material))) params = { 'Action': 'ImportKeyPair', 'KeyName': name, 'PublicKeyMaterial': base64key } response = self.connection.request(self.path, params=params) elem = response.object key_pair = self._to_key_pair(elem=elem) return key_pair def delete_key_pair(self, key_pair): params = { 'Action': 'DeleteKeyPair', 'KeyName': key_pair.name } res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def copy_image(self, image, source_region, name=None, description=None): """ Copy an Amazon Machine Image from the specified source region to the current region. @inherits: :class:`NodeDriver.copy_image` :param source_region: The region where the image resides :type source_region: ``str`` :param image: Instance of class NodeImage :type image: :class:`NodeImage` :param name: The name of the new image :type name: ``str`` :param description: The description of the new image :type description: ``str`` :return: Instance of class ``NodeImage`` :rtype: :class:`NodeImage` """ params = {'Action': 'CopyImage', 'SourceRegion': source_region, 'SourceImageId': image.id} if name is not None: params['Name'] = name if description is not None: params['Description'] = description image = self._to_image( self.connection.request(self.path, params=params).object) return image def create_image(self, node, name, description=None, reboot=False, block_device_mapping=None): """ Create an Amazon Machine Image based off of an EBS-backed instance. @inherits: :class:`NodeDriver.create_image` :param node: Instance of ``Node`` :type node: :class: `Node` :param name: The name for the new image :type name: ``str`` :param block_device_mapping: A dictionary of the disk layout An example of this dict is included below. :type block_device_mapping: ``list`` of ``dict`` :param reboot: Whether or not to shutdown the instance before creation. Amazon calls this NoReboot and sets it to false by default to ensure a clean image. :type reboot: ``bool`` :param description: An optional description for the new image :type description: ``str`` An example block device mapping dictionary is included: mapping = [{'VirtualName': None, 'Ebs': {'VolumeSize': 10, 'VolumeType': 'standard', 'DeleteOnTermination': 'true'}, 'DeviceName': '/dev/sda1'}] :return: Instance of class ``NodeImage`` :rtype: :class:`NodeImage` """ params = {'Action': 'CreateImage', 'InstanceId': node.id, 'Name': name, 'NoReboot': not reboot} if description is not None: params['Description'] = description if block_device_mapping is not None: params.update(self._get_block_device_mapping_params( block_device_mapping)) image = self._to_image( self.connection.request(self.path, params=params).object) return image def delete_image(self, image): """ Deletes an image at Amazon given a NodeImage object @inherits: :class:`NodeDriver.delete_image` :param image: Instance of ``NodeImage`` :type image: :class: `NodeImage` :rtype: ``bool`` """ params = {'Action': 'DeregisterImage', 'ImageId': image.id} response = self.connection.request(self.path, params=params).object return self._get_boolean(response) def ex_create_placement_group(self, name): """ Creates a new placement group. :param name: The name for the new placement group :type name: ``str`` :rtype: ``bool`` """ params = {'Action': 'CreatePlacementGroup', 'Strategy': 'cluster', 'GroupName': name} response = self.connection.request(self.path, params=params).object return self._get_boolean(response) def ex_delete_placement_group(self, name): """ Deletes a placement group. :param name: The placement group name :type name: ``str`` :rtype: ``bool`` """ params = {'Action': 'DeletePlacementGroup', 'GroupName': name} response = self.connection.request(self.path, params=params).object return self._get_boolean(response) def ex_import_snapshot(self, client_data=None, client_token=None, description=None, disk_container=None, dry_run=None, role_name=None): """ Imports a disk into an EBS snapshot. More information can be found at https://goo.gl/sbXkYA. :param client_data: Describes the client specific data (optional) :type client_data: ``dict`` :param client_token: The token to enable idempotency for VM import requests.(optional) :type client_token: ``str`` :param description: The description string for the import snapshot task.(optional) :type description: ``str`` :param disk_container:The disk container object for the import snapshot request. :type disk_container:``dict`` :param dry_run: Checks whether you have the permission for the action, without actually making the request, and provides an error response.(optional) :type dry_run: ``bool`` :param role_name: The name of the role to use when not using the default role, 'vmimport'.(optional) :type role_name: ``str`` :rtype: :class: ``VolumeSnapshot`` """ params = {'Action': 'ImportSnapshot'} if client_data is not None: params.update(self._get_client_date_params(client_data)) if client_token is not None: params['ClientToken'] = client_token if description is not None: params['Description'] = description if disk_container is not None: params.update(self._get_disk_container_params(disk_container)) if dry_run is not None: params['DryRun'] = dry_run if role_name is not None: params['RoleName'] = role_name importSnapshot = self.connection.request(self.path, params=params).object importTaskId = findtext(element=importSnapshot, xpath='importTaskId', namespace=NAMESPACE) volumeSnapshot = self._wait_for_import_snapshot_completion( import_task_id=importTaskId, timeout=1800, interval=15) return volumeSnapshot def _wait_for_import_snapshot_completion(self, import_task_id, timeout=1800, interval=15): """ It waits for import snapshot to be completed :param import_task_id: Import task Id for the current Import Snapshot Task :type import_task_id: ``str`` :param timeout: Timeout value for snapshot generation :type timeout: ``float`` :param interval: Time interval for repetative describe import snapshot tasks requests :type interval: ``float`` :rtype: :class:``VolumeSnapshot`` """ start_time = time.time() snapshotId = None while snapshotId is None: if (time.time() - start_time >= timeout): raise Exception('Timeout while waiting ' 'for import task Id %s' % import_task_id) res = self.ex_describe_import_snapshot_tasks(import_task_id) snapshotId = res.snapshotId if snapshotId is None: time.sleep(interval) volumeSnapshot = VolumeSnapshot(snapshotId, driver=self) return volumeSnapshot def ex_describe_import_snapshot_tasks(self, import_task_id, dry_run=None): """ Describes your import snapshot tasks. More information can be found at https://goo.gl/CI0MdS. :param import_task_id: Import task Id for the current Import Snapshot Task :type import_task_id: ``str`` :param dry_run: Checks whether you have the permission for the action, without actually making the request, and provides an error response.(optional) :type dry_run: ``bool`` :rtype: :class:``DescribeImportSnapshotTasks Object`` """ params = {'Action': 'DescribeImportSnapshotTasks'} if dry_run is not None: params['DryRun'] = dry_run # This can be extended for multiple import snapshot tasks params['ImportTaskId.1'] = import_task_id res = self._to_import_snapshot_task( self.connection.request(self.path, params=params).object ) return res def ex_list_placement_groups(self, names=None): """ A list of placement groups. :param names: Placement Group names :type names: ``list`` of ``str`` :rtype: ``list`` of :class:`.EC2PlacementGroup` """ names = names or [] params = {'Action': 'DescribePlacementGroups'} for index, name in enumerate(names): params['GroupName.%s' % index + 1] = name response = self.connection.request(self.path, params=params).object return self._to_placement_groups(response) def ex_register_image(self, name, description=None, architecture=None, image_location=None, root_device_name=None, block_device_mapping=None, kernel_id=None, ramdisk_id=None, virtualization_type=None, ena_support=None, billing_products=None, sriov_net_support=None): """ Registers an Amazon Machine Image based off of an EBS-backed instance. Can also be used to create images from snapshots. More information can be found at http://goo.gl/hqZq0a. :param name: The name for the AMI being registered :type name: ``str`` :param description: The description of the AMI (optional) :type description: ``str`` :param architecture: The architecture of the AMI (i386/x86_64) (optional) :type architecture: ``str`` :param image_location: The location of the AMI within Amazon S3 Required if registering an instance store-backed AMI :type image_location: ``str`` :param root_device_name: The device name for the root device Required if registering an EBS-backed AMI :type root_device_name: ``str`` :param block_device_mapping: A dictionary of the disk layout (optional) :type block_device_mapping: ``dict`` :param kernel_id: Kernel id for AMI (optional) :type kernel_id: ``str`` :param ramdisk_id: RAM disk for AMI (optional) :type ramdisk_id: ``str`` :param virtualization_type: The type of virtualization for the AMI you are registering, paravirt or hvm (optional) :type virtualization_type: ``str`` :param ena_support: Enable enhanced networking with Elastic Network Adapter for the AMI :type ena_support: ``bool`` :param billing_products: The billing product codes :type billing_products: ''list'' :param sriov_net_support: Set to "simple" to enable enhanced networking with the Intel 82599 Virtual Function interface :type sriov_net_support: ``str`` :rtype: :class:`NodeImage` """ params = {'Action': 'RegisterImage', 'Name': name} if description is not None: params['Description'] = description if architecture is not None: params['Architecture'] = architecture if image_location is not None: params['ImageLocation'] = image_location if root_device_name is not None: params['RootDeviceName'] = root_device_name if block_device_mapping is not None: params.update(self._get_block_device_mapping_params( block_device_mapping)) if kernel_id is not None: params['KernelId'] = kernel_id if ramdisk_id is not None: params['RamDiskId'] = ramdisk_id if virtualization_type is not None: params['VirtualizationType'] = virtualization_type if ena_support is not None: params['EnaSupport'] = ena_support if billing_products is not None: params.update(self._get_billing_product_params( billing_products)) if sriov_net_support is not None: params['SriovNetSupport'] = sriov_net_support image = self._to_image( self.connection.request(self.path, params=params).object ) return image def ex_list_networks(self, network_ids=None, filters=None): """ Returns a list of :class:`EC2Network` objects for the current region. :param network_ids: Returns only networks matching the provided network IDs. If not specified, a list of all the networks in the corresponding region is returned. :type network_ids: ``list`` :param filters: The filters so that the list returned includes information for certain networks only. :type filters: ``dict`` :rtype: ``list`` of :class:`EC2Network` """ params = {'Action': 'DescribeVpcs'} if network_ids: params.update(self._pathlist('VpcId', network_ids)) if filters: params.update(self._build_filters(filters)) return self._to_networks( self.connection.request(self.path, params=params).object ) def ex_create_network(self, cidr_block, name=None, instance_tenancy='default'): """ Create a network/VPC :param cidr_block: The CIDR block assigned to the network :type cidr_block: ``str`` :param name: An optional name for the network :type name: ``str`` :param instance_tenancy: The allowed tenancy of instances launched into the VPC. Valid values: default/dedicated :type instance_tenancy: ``str`` :return: Dictionary of network properties :rtype: ``dict`` """ params = {'Action': 'CreateVpc', 'CidrBlock': cidr_block, 'InstanceTenancy': instance_tenancy} response = self.connection.request(self.path, params=params).object element = response.findall(fixxpath(xpath='vpc', namespace=NAMESPACE))[0] network = self._to_network(element, name) if name and self.ex_create_tags(network, {'Name': name}): network.extra['tags']['Name'] = name return network def ex_delete_network(self, vpc): """ Deletes a network/VPC. :param vpc: VPC to delete. :type vpc: :class:`.EC2Network` :rtype: ``bool`` """ params = {'Action': 'DeleteVpc', 'VpcId': vpc.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_list_subnets(self, subnet_ids=None, filters=None): """ Returns a list of :class:`EC2NetworkSubnet` objects for the current region. :param subnet_ids: Returns only subnets matching the provided subnet IDs. If not specified, a list of all the subnets in the corresponding region is returned. :type subnet_ids: ``list`` :param filters: The filters so that the list returned includes information for certain subnets only. :type filters: ``dict`` :rtype: ``list`` of :class:`EC2NetworkSubnet` """ params = {'Action': 'DescribeSubnets'} if subnet_ids: params.update(self._pathlist('SubnetId', subnet_ids)) if filters: params.update(self._build_filters(filters)) return self._to_subnets( self.connection.request(self.path, params=params).object ) def ex_create_subnet(self, vpc_id, cidr_block, availability_zone, name=None): """ Creates a network subnet within a VPC. :param vpc_id: The ID of the VPC that the subnet should be associated with :type vpc_id: ``str`` :param cidr_block: The CIDR block assigned to the subnet :type cidr_block: ``str`` :param availability_zone: The availability zone where the subnet should reside :type availability_zone: ``str`` :param name: An optional name for the network :type name: ``str`` :rtype: :class: `EC2NetworkSubnet` """ params = {'Action': 'CreateSubnet', 'VpcId': vpc_id, 'CidrBlock': cidr_block, 'AvailabilityZone': availability_zone} response = self.connection.request(self.path, params=params).object element = response.findall(fixxpath(xpath='subnet', namespace=NAMESPACE))[0] subnet = self._to_subnet(element, name) if name and self.ex_create_tags(subnet, {'Name': name}): subnet.extra['tags']['Name'] = name return subnet def ex_delete_subnet(self, subnet): """ Deletes a VPC subnet. :param subnet: The subnet to delete :type subnet: :class:`.EC2NetworkSubnet` :rtype: ``bool`` """ params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_list_security_groups(self): """ Lists existing Security Groups. @note: This is a non-standard extension API, and only works for EC2. :rtype: ``list`` of ``str`` """ params = {'Action': 'DescribeSecurityGroups'} response = self.connection.request(self.path, params=params).object groups = [] for group in findall(element=response, xpath='securityGroupInfo/item', namespace=NAMESPACE): name = findtext(element=group, xpath='groupName', namespace=NAMESPACE) groups.append(name) return groups def ex_get_security_groups(self, group_ids=None, group_names=None, filters=None): """ Returns a list of :class:`EC2SecurityGroup` objects for the current region. :param group_ids: Returns only groups matching the provided group IDs. :type group_ids: ``list`` :param group_names: Returns only groups matching the provided group names. :type group_ids: ``list`` :param filters: The filters so that the list returned includes information for specific security groups only. :type filters: ``dict`` :rtype: ``list`` of :class:`EC2SecurityGroup` """ params = {'Action': 'DescribeSecurityGroups'} if group_ids: params.update(self._pathlist('GroupId', group_ids)) if group_names: for name_idx, group_name in enumerate(group_names): name_idx += 1 # We want 1-based indexes name_key = 'GroupName.%s' % (name_idx) params[name_key] = group_name if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params) return self._to_security_groups(response.object) def ex_create_security_group(self, name, description, vpc_id=None): """ Creates a new Security Group in EC2-Classic or a targeted VPC. :param name: The name of the security group to create. This must be unique. :type name: ``str`` :param description: Human readable description of a Security Group. :type description: ``str`` :param vpc_id: Optional identifier for VPC networks :type vpc_id: ``str`` :rtype: ``dict`` """ params = {'Action': 'CreateSecurityGroup', 'GroupName': name, 'GroupDescription': description} if vpc_id is not None: params['VpcId'] = vpc_id response = self.connection.request(self.path, params=params).object group_id = findattr(element=response, xpath='groupId', namespace=NAMESPACE) return { 'group_id': group_id } def ex_delete_security_group_by_id(self, group_id): """ Deletes a new Security Group using the group ID. :param group_id: The ID of the security group :type group_id: ``str`` :rtype: ``bool`` """ params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_delete_security_group_by_name(self, group_name): """ Deletes a new Security Group using the group name. :param group_name: The name of the security group :type group_name: ``str`` :rtype: ``bool`` """ params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_delete_security_group(self, name): """ A wrapper method which calls ex_delete_security_group_by_name. :param name: The name of the security group :type name: ``str`` :rtype: ``bool`` """ return self.ex_delete_security_group_by_name(name) def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip, protocol='tcp'): """ Edit a Security Group to allow specific traffic. @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the security group to edit :type name: ``str`` :param from_port: The beginning of the port range to open :type from_port: ``str`` :param to_port: The end of the port range to open :type to_port: ``str`` :param cidr_ip: The ip to allow traffic for. :type cidr_ip: ``str`` :param protocol: tcp/udp/icmp :type protocol: ``str`` :rtype: ``bool`` """ params = {'Action': 'AuthorizeSecurityGroupIngress', 'GroupName': name, 'IpProtocol': protocol, 'FromPort': str(from_port), 'ToPort': str(to_port), 'CidrIp': cidr_ip} try: res = self.connection.request( self.path, params=params.copy()).object return self._get_boolean(res) except Exception: e = sys.exc_info()[1] if e.args[0].find('InvalidPermission.Duplicate') == -1: raise e def ex_authorize_security_group_ingress(self, id, from_port, to_port, cidr_ips=None, group_pairs=None, protocol='tcp'): """ Edit a Security Group to allow specific ingress traffic using CIDR blocks or either a group ID, group name or user ID (account). :param id: The id of the security group to edit :type id: ``str`` :param from_port: The beginning of the port range to open :type from_port: ``int`` :param to_port: The end of the port range to open :type to_port: ``int`` :param cidr_ips: The list of IP ranges to allow traffic for. :type cidr_ips: ``list`` :param group_pairs: Source user/group pairs to allow traffic for. More info can be found at http://goo.gl/stBHJF EC2 Classic Example: To allow access from any system associated with the default group on account 1234567890 [{'group_name': 'default', 'user_id': '1234567890'}] VPC example: To allow access from any system associated with security group sg-47ad482e on your own account [{'group_id': ' sg-47ad482e'}] :type group_pairs: ``list`` of ``dict`` :param protocol: tcp/udp/icmp :type protocol: ``str`` :rtype: ``bool`` """ params = self._get_common_security_group_params(id, protocol, from_port, to_port, cidr_ips, group_pairs) params["Action"] = 'AuthorizeSecurityGroupIngress' res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_authorize_security_group_egress(self, id, from_port, to_port, cidr_ips, group_pairs=None, protocol='tcp'): """ Edit a Security Group to allow specific egress traffic using CIDR blocks or either a group ID, group name or user ID (account). This call is not supported for EC2 classic and only works for VPC groups. :param id: The id of the security group to edit :type id: ``str`` :param from_port: The beginning of the port range to open :type from_port: ``int`` :param to_port: The end of the port range to open :type to_port: ``int`` :param cidr_ips: The list of ip ranges to allow traffic for. :type cidr_ips: ``list`` :param group_pairs: Source user/group pairs to allow traffic for. More info can be found at http://goo.gl/stBHJF EC2 Classic Example: To allow access from any system associated with the default group on account 1234567890 [{'group_name': 'default', 'user_id': '1234567890'}] VPC Example: Allow access from any system associated with security group sg-47ad482e on your own account [{'group_id': ' sg-47ad482e'}] :type group_pairs: ``list`` of ``dict`` :param protocol: tcp/udp/icmp :type protocol: ``str`` :rtype: ``bool`` """ params = self._get_common_security_group_params(id, protocol, from_port, to_port, cidr_ips, group_pairs) params["Action"] = 'AuthorizeSecurityGroupEgress' res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_revoke_security_group_ingress(self, id, from_port, to_port, cidr_ips=None, group_pairs=None, protocol='tcp'): """ Edits a Security Group to revoke specific ingress traffic using CIDR blocks or either a group ID, group name or user ID (account). :param id: The ID of the security group to edit :type id: ``str`` :param from_port: The beginning of the port range to open :type from_port: ``int`` :param to_port: The end of the port range to open :type to_port: ``int`` :param cidr_ips: The list of ip ranges to allow traffic for. :type cidr_ips: ``list`` :param group_pairs: Source user/group pairs to allow traffic for. More info can be found at http://goo.gl/stBHJF EC2 Classic Example: To allow access from any system associated with the default group on account 1234567890 [{'group_name': 'default', 'user_id': '1234567890'}] VPC Example: Allow access from any system associated with security group sg-47ad482e on your own account [{'group_id': ' sg-47ad482e'}] :type group_pairs: ``list`` of ``dict`` :param protocol: tcp/udp/icmp :type protocol: ``str`` :rtype: ``bool`` """ params = self._get_common_security_group_params(id, protocol, from_port, to_port, cidr_ips, group_pairs) params["Action"] = 'RevokeSecurityGroupIngress' res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_revoke_security_group_egress(self, id, from_port, to_port, cidr_ips=None, group_pairs=None, protocol='tcp'): """ Edit a Security Group to revoke specific egress traffic using CIDR blocks or either a group ID, group name or user ID (account). This call is not supported for EC2 classic and only works for VPC groups. :param id: The id of the security group to edit :type id: ``str`` :param from_port: The beginning of the port range to open :type from_port: ``int`` :param to_port: The end of the port range to open :type to_port: ``int`` :param cidr_ips: The list of ip ranges to allow traffic for. :type cidr_ips: ``list`` :param group_pairs: Source user/group pairs to allow traffic for. More info can be found at http://goo.gl/stBHJF EC2 Classic Example: To allow access from any system associated with the default group on account 1234567890 [{'group_name': 'default', 'user_id': '1234567890'}] VPC Example: Allow access from any system associated with security group sg-47ad482e on your own account [{'group_id': ' sg-47ad482e'}] :type group_pairs: ``list`` of ``dict`` :param protocol: tcp/udp/icmp :type protocol: ``str`` :rtype: ``bool`` """ params = self._get_common_security_group_params(id, protocol, from_port, to_port, cidr_ips, group_pairs) params['Action'] = 'RevokeSecurityGroupEgress' res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_authorize_security_group_permissive(self, name): """ Edit a Security Group to allow all traffic. @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the security group to edit :type name: ``str`` :rtype: ``list`` of ``str`` """ results = [] params = {'Action': 'AuthorizeSecurityGroupIngress', 'GroupName': name, 'IpProtocol': 'tcp', 'FromPort': '0', 'ToPort': '65535', 'CidrIp': '0.0.0.0/0'} try: results.append( self.connection.request(self.path, params=params.copy()).object ) except Exception: e = sys.exc_info()[1] if e.args[0].find("InvalidPermission.Duplicate") == -1: raise e params['IpProtocol'] = 'udp' try: results.append( self.connection.request(self.path, params=params.copy()).object ) except Exception: e = sys.exc_info()[1] if e.args[0].find("InvalidPermission.Duplicate") == -1: raise e params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'}) try: results.append( self.connection.request(self.path, params=params.copy()).object ) except Exception: e = sys.exc_info()[1] if e.args[0].find("InvalidPermission.Duplicate") == -1: raise e return results def ex_list_availability_zones(self, only_available=True): """ Returns a list of :class:`ExEC2AvailabilityZone` objects for the current region. Note: This is an extension method and is only available for EC2 driver. :keyword only_available: If true, returns only availability zones with state 'available' :type only_available: ``str`` :rtype: ``list`` of :class:`ExEC2AvailabilityZone` """ params = {'Action': 'DescribeAvailabilityZones'} filters = {'region-name': self.region_name} if only_available: filters['state'] = 'available' params.update(self._build_filters(filters)) result = self.connection.request(self.path, params=params.copy()).object availability_zones = [] for element in findall(element=result, xpath='availabilityZoneInfo/item', namespace=NAMESPACE): name = findtext(element=element, xpath='zoneName', namespace=NAMESPACE) zone_state = findtext(element=element, xpath='zoneState', namespace=NAMESPACE) region_name = findtext(element=element, xpath='regionName', namespace=NAMESPACE) availability_zone = ExEC2AvailabilityZone( name=name, zone_state=zone_state, region_name=region_name ) availability_zones.append(availability_zone) return availability_zones def ex_describe_tags(self, resource): """ Returns a dictionary of tags for a resource (e.g. Node or StorageVolume). :param resource: The resource to be used :type resource: any resource class, such as :class:`Node,` :class:`StorageVolume,` or :class:NodeImage` :return: A dictionary of Node tags :rtype: ``dict`` """ params = {'Action': 'DescribeTags'} filters = { 'resource-id': resource.id } params.update(self._build_filters(filters)) result = self.connection.request(self.path, params=params).object return self._get_resource_tags(result) def ex_create_tags(self, resource, tags): """ Creates tags for a resource (Node or StorageVolume). :param resource: The resource to be tagged :type resource: :class:`Node` or :class:`StorageVolume` or :class:`VolumeSnapshot` :param tags: A dictionary or other mapping of strings to strings, associating tag names with tag values. :type tags: ``dict`` :rtype: ``bool`` """ if not tags: return params = {'Action': 'CreateTags', 'ResourceId.0': resource.id} for i, key in enumerate(tags): params['Tag.%d.Key' % i] = key params['Tag.%d.Value' % i] = tags[key] res = self.connection.request(self.path, params=params.copy()).object return self._get_boolean(res) def ex_delete_tags(self, resource, tags): """ Deletes tags from a resource. :param resource: The resource to be tagged :type resource: :class:`Node` or :class:`StorageVolume` :param tags: A dictionary or other mapping of strings to strings, specifying the tag names and tag values to be deleted. :type tags: ``dict`` :rtype: ``bool`` """ if not tags: return params = {'Action': 'DeleteTags', 'ResourceId.0': resource.id} for i, key in enumerate(tags): params['Tag.%d.Key' % i] = key if tags[key] is not None: params['Tag.%d.Value' % i] = tags[key] res = self.connection.request(self.path, params=params.copy()).object return self._get_boolean(res) def ex_get_metadata_for_node(self, node): """ Returns the metadata associated with the node. :param node: Node instance :type node: :class:`Node` :return: A dictionary or other mapping of strings to strings, associating tag names with tag values. :rtype tags: ``dict`` """ return node.extra['tags'] def ex_allocate_address(self, domain='standard'): """ Allocate a new Elastic IP address for EC2 classic or VPC :param domain: The domain to allocate the new address in (standard/vpc) :type domain: ``str`` :return: Instance of ElasticIP :rtype: :class:`ElasticIP` """ params = {'Action': 'AllocateAddress'} if domain == 'vpc': params['Domain'] = domain response = self.connection.request(self.path, params=params).object return self._to_address(response, only_associated=False) def ex_release_address(self, elastic_ip, domain=None): """ Releases an Elastic IP address using the IP (EC2-Classic) or using the allocation ID (VPC). :param elastic_ip: Elastic IP instance :type elastic_ip: :class:`ElasticIP` :param domain: The domain where the IP resides (vpc only) :type domain: ``str`` :return: True on success, False otherwise. :rtype: ``bool`` """ params = {'Action': 'ReleaseAddress'} if domain is not None and domain != 'vpc': raise AttributeError('Domain can only be set to vpc') if domain is None: params['PublicIp'] = elastic_ip.ip else: params['AllocationId'] = elastic_ip.extra['allocation_id'] response = self.connection.request(self.path, params=params).object return self._get_boolean(response) def ex_describe_all_addresses(self, only_associated=False): """ Returns all the Elastic IP addresses for this account optionally, returns only addresses associated with nodes. :param only_associated: If true, return only the addresses that are associated with an instance. :type only_associated: ``bool`` :return: List of Elastic IP addresses. :rtype: ``list`` of :class:`ElasticIP` """ params = {'Action': 'DescribeAddresses'} response = self.connection.request(self.path, params=params).object # We will send our only_associated boolean over to # shape how the return data is sent back return self._to_addresses(response, only_associated) def ex_associate_address_with_node(self, node, elastic_ip, domain=None): """ Associate an Elastic IP address with a particular node. :param node: Node instance :type node: :class:`Node` :param elastic_ip: Elastic IP instance :type elastic_ip: :class:`ElasticIP` :param domain: The domain where the IP resides (vpc only) :type domain: ``str`` :return: A string representation of the association ID which is required for VPC disassociation. EC2/standard addresses return None :rtype: ``None`` or ``str`` """ params = {'Action': 'AssociateAddress', 'InstanceId': node.id} if domain is not None and domain != 'vpc': raise AttributeError('Domain can only be set to vpc') if domain is None: params.update({'PublicIp': elastic_ip.ip}) else: params.update({'AllocationId': elastic_ip.extra['allocation_id']}) response = self.connection.request(self.path, params=params).object association_id = findtext(element=response, xpath='associationId', namespace=NAMESPACE) return association_id def ex_associate_addresses(self, node, elastic_ip, domain=None): """ Note: This method has been deprecated in favor of the ex_associate_address_with_node method. """ return self.ex_associate_address_with_node(node=node, elastic_ip=elastic_ip, domain=domain) def ex_disassociate_address(self, elastic_ip, domain=None): """ Disassociates an Elastic IP address using the IP (EC2-Classic) or the association ID (VPC). :param elastic_ip: ElasticIP instance :type elastic_ip: :class:`ElasticIP` :param domain: The domain where the IP resides (vpc only) :type domain: ``str`` :return: True on success, False otherwise. :rtype: ``bool`` """ params = {'Action': 'DisassociateAddress'} if domain is not None and domain != 'vpc': raise AttributeError('Domain can only be set to vpc') if domain is None: params['PublicIp'] = elastic_ip.ip else: params['AssociationId'] = elastic_ip.extra['association_id'] res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_describe_addresses(self, nodes): """ Returns Elastic IP addresses for all the nodes in the provided list. :param nodes: A list of :class:`Node` instances :type nodes: ``list`` of :class:`Node` :return: Dictionary where a key is a node ID and the value is a list with the Elastic IP addresses associated with this node. :rtype: ``dict`` """ if not nodes: return {} params = {'Action': 'DescribeAddresses'} if len(nodes) == 1: self._add_instance_filter(params, nodes[0]) result = self.connection.request(self.path, params=params).object node_instance_ids = [node.id for node in nodes] nodes_elastic_ip_mappings = {} # We will set only_associated to True so that we only get back # IPs which are associated with instances only_associated = True for node_id in node_instance_ids: nodes_elastic_ip_mappings.setdefault(node_id, []) for addr in self._to_addresses(result, only_associated): instance_id = addr.instance_id if node_id == instance_id: nodes_elastic_ip_mappings[instance_id].append( addr.ip) return nodes_elastic_ip_mappings def ex_describe_addresses_for_node(self, node): """ Returns a list of Elastic IP Addresses associated with this node. :param node: Node instance :type node: :class:`Node` :return: List Elastic IP Addresses attached to this node. :rtype: ``list`` of ``str`` """ node_elastic_ips = self.ex_describe_addresses([node]) return node_elastic_ips[node.id] # Network interface management methods def ex_list_network_interfaces(self): """ Returns all network interfaces. :return: List of EC2NetworkInterface instances :rtype: ``list`` of :class `EC2NetworkInterface` """ params = {'Action': 'DescribeNetworkInterfaces'} return self._to_interfaces( self.connection.request(self.path, params=params).object ) def ex_create_network_interface(self, subnet, name=None, description=None, private_ip_address=None): """ Create a network interface within a VPC subnet. :param subnet: EC2NetworkSubnet instance :type subnet: :class:`EC2NetworkSubnet` :param name: Optional name of the interface :type name: ``str`` :param description: Optional description of the network interface :type description: ``str`` :param private_ip_address: Optional address to assign as the primary private IP address of the interface. If one is not provided then Amazon will automatically auto-assign an available IP. EC2 allows assignment of multiple IPs, but this will be the primary. :type private_ip_address: ``str`` :return: EC2NetworkInterface instance :rtype: :class `EC2NetworkInterface` """ params = {'Action': 'CreateNetworkInterface', 'SubnetId': subnet.id} if description: params['Description'] = description if private_ip_address: params['PrivateIpAddress'] = private_ip_address response = self.connection.request(self.path, params=params).object element = response.findall(fixxpath(xpath='networkInterface', namespace=NAMESPACE))[0] interface = self._to_interface(element, name) if name and self.ex_create_tags(interface, {'Name': name}): interface.extra['tags']['Name'] = name return interface def ex_delete_network_interface(self, network_interface): """ Deletes a network interface. :param network_interface: EC2NetworkInterface instance :type network_interface: :class:`EC2NetworkInterface` :rtype: ``bool`` """ params = {'Action': 'DeleteNetworkInterface', 'NetworkInterfaceId': network_interface.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_attach_network_interface_to_node(self, network_interface, node, device_index): """ Attach a network interface to an instance. :param network_interface: EC2NetworkInterface instance :type network_interface: :class:`EC2NetworkInterface` :param node: Node instance :type node: :class:`Node` :param device_index: The interface device index :type device_index: ``int`` :return: String representation of the attachment id. This is required to detach the interface. :rtype: ``str`` """ params = {'Action': 'AttachNetworkInterface', 'NetworkInterfaceId': network_interface.id, 'InstanceId': node.id, 'DeviceIndex': device_index} response = self.connection.request(self.path, params=params).object attachment_id = findattr(element=response, xpath='attachmentId', namespace=NAMESPACE) return attachment_id def ex_detach_network_interface(self, attachment_id, force=False): """ Detach a network interface from an instance. :param attachment_id: The attachment ID associated with the interface :type attachment_id: ``str`` :param force: Forces the detachment. :type force: ``bool`` :return: ``True`` on successful detachment, ``False`` otherwise. :rtype: ``bool`` """ params = {'Action': 'DetachNetworkInterface', 'AttachmentId': attachment_id} if force: params['Force'] = True res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_modify_instance_attribute(self, node, attributes): """ Modify node attributes. A list of valid attributes can be found at http://goo.gl/gxcj8 :param node: Node instance :type node: :class:`Node` :param attributes: Dictionary with node attributes :type attributes: ``dict`` :return: True on success, False otherwise. :rtype: ``bool`` """ attributes = attributes or {} attributes.update({'InstanceId': node.id}) params = {'Action': 'ModifyInstanceAttribute'} params.update(attributes) res = self.connection.request(self.path, params=params.copy()).object return self._get_boolean(res) def ex_modify_snapshot_attribute(self, snapshot, attributes): """ Modify Snapshot attributes. :param snapshot: VolumeSnapshot instance :type snanpshot: :class:`VolumeSnapshot` :param attributes: Dictionary with snapshot attributes :type attributes: ``dict`` :return: True on success, False otherwise. :rtype: ``bool`` """ attributes = attributes or {} attributes.update({'SnapshotId': snapshot.id}) params = {'Action': 'ModifySnapshotAttribute'} params.update(attributes) res = self.connection.request(self.path, params=params.copy()).object return self._get_boolean(res) def ex_modify_image_attribute(self, image, attributes): """ Modifies image attributes. :param image: NodeImage instance :type image: :class:`NodeImage` :param attributes: A dictionary with node attributes :type attributes: ``dict`` :return: True on success, False otherwise. :rtype: ``bool`` """ attributes = attributes or {} attributes.update({'ImageId': image.id}) params = {'Action': 'ModifyImageAttribute'} params.update(attributes) res = self.connection.request(self.path, params=params.copy()).object return self._get_boolean(res) def ex_change_node_size(self, node, new_size): """ Change the node size. Note: Node must be turned of before changing the size. :param node: Node instance :type node: :class:`Node` :param new_size: NodeSize instance :type new_size: :class:`NodeSize` :return: True on success, False otherwise. :rtype: ``bool`` """ if 'instancetype' in node.extra: current_instance_type = node.extra['instancetype'] if current_instance_type == new_size.id: raise ValueError('New instance size is the same as' + 'the current one') attributes = {'InstanceType.Value': new_size.id} return self.ex_modify_instance_attribute(node, attributes) def ex_start_node(self, node): """ Starts the node by passing in the node object, does not work with instance store backed instances. :param node: The node to be used :type node: :class:`Node` :rtype: ``bool`` """ params = {'Action': 'StartInstances'} params.update(self._pathlist('InstanceId', [node.id])) res = self.connection.request(self.path, params=params).object return self._get_state_boolean(res) def ex_stop_node(self, node): """ Stops the node by passing in the node object, does not work with instance store backed instances :param node: The node to be used :type node: :class:`Node` :rtype: ``bool`` """ params = {'Action': 'StopInstances'} params.update(self._pathlist('InstanceId', [node.id])) res = self.connection.request(self.path, params=params).object return self._get_state_boolean(res) def ex_get_console_output(self, node): """ Gets console output for the node. :param node: Node which should be used :type node: :class:`Node` :return: A dictionary with the following keys: - instance_id (``str``) - timestamp (``datetime.datetime``) - last output timestamp - output (``str``) - console output :rtype: ``dict`` """ params = { 'Action': 'GetConsoleOutput', 'InstanceId': node.id } response = self.connection.request(self.path, params=params).object timestamp = findattr(element=response, xpath='timestamp', namespace=NAMESPACE) encoded_string = findattr(element=response, xpath='output', namespace=NAMESPACE) timestamp = parse_date(timestamp) if encoded_string: output = base64.b64decode(b(encoded_string)).decode('utf-8') else: # No console output output = None return {'instance_id': node.id, 'timestamp': timestamp, 'output': output} def ex_list_reserved_nodes(self): """ Lists all reserved instances/nodes which can be purchased from Amazon for one or three year terms. Reservations are made at a region level and reduce the hourly charge for instances. More information can be found at http://goo.gl/ulXCC7. :rtype: ``list`` of :class:`.EC2ReservedNode` """ params = {'Action': 'DescribeReservedInstances'} response = self.connection.request(self.path, params=params).object return self._to_reserved_nodes(response, 'reservedInstancesSet/item') # Account specific methods def ex_get_limits(self): """ Retrieve account resource limits. :rtype: ``dict`` """ attributes = ['max-instances', 'max-elastic-ips', 'vpc-max-elastic-ips'] params = {} params['Action'] = 'DescribeAccountAttributes' for index, attribute in enumerate(attributes): params['AttributeName.%s' % (index)] = attribute response = self.connection.request(self.path, params=params) data = response.object elems = data.findall(fixxpath(xpath='accountAttributeSet/item', namespace=NAMESPACE)) result = {'resource': {}} for elem in elems: name = findtext(element=elem, xpath='attributeName', namespace=NAMESPACE) value = findtext(element=elem, xpath='attributeValueSet/item/attributeValue', namespace=NAMESPACE) result['resource'][name] = int(value) return result # Deprecated extension methods def ex_list_keypairs(self): """ Lists all the keypair names and fingerprints. :rtype: ``list`` of ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'list_key_pairs method') key_pairs = self.list_key_pairs() result = [] for key_pair in key_pairs: item = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint, } result.append(item) return result def ex_describe_all_keypairs(self): """ Returns names for all the available key pairs. @note: This is a non-standard extension API, and only works for EC2. :rtype: ``list`` of ``str`` """ names = [key_pair.name for key_pair in self.list_key_pairs()] return names def ex_describe_keypairs(self, name): """ Here for backward compatibility. """ return self.ex_describe_keypair(name=name) def ex_describe_keypair(self, name): """ Describes a keypair by name. @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the keypair to describe. :type name: ``str`` :rtype: ``dict`` """ params = { 'Action': 'DescribeKeyPairs', 'KeyName.1': name } response = self.connection.request(self.path, params=params).object key_name = findattr(element=response, xpath='keySet/item/keyName', namespace=NAMESPACE) fingerprint = findattr(element=response, xpath='keySet/item/keyFingerprint', namespace=NAMESPACE).strip() return { 'keyName': key_name, 'keyFingerprint': fingerprint } def ex_create_keypair(self, name): """ Creates a new keypair @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the keypair to Create. This must be unique, otherwise an InvalidKeyPair.Duplicate exception is raised. :type name: ``str`` :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'create_key_pair method') key_pair = self.create_key_pair(name=name) result = { 'keyMaterial': key_pair.private_key, 'keyFingerprint': key_pair.fingerprint } return result def ex_delete_keypair(self, keypair): """ Deletes a key pair by name. @note: This is a non-standard extension API, and only works with EC2. :param keypair: The name of the keypair to delete. :type keypair: ``str`` :rtype: ``bool`` """ warnings.warn('This method has been deprecated in favor of ' 'delete_key_pair method') keypair = KeyPair(name=keypair, public_key=None, fingerprint=None, driver=self) return self.delete_key_pair(keypair) def ex_import_keypair_from_string(self, name, key_material): """ Imports a new public key where the public key is passed in as a string. @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the public key to import. This must be unique, otherwise an InvalidKeyPair.Duplicate exception is raised. :type name: ``str`` :param key_material: The contents of a public key file. :type key_material: ``str`` :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_string method') key_pair = self.import_key_pair_from_string(name=name, key_material=key_material) result = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint } return result def ex_import_keypair(self, name, keyfile): """ Imports a new public key where the public key is passed via a filename. @note: This is a non-standard extension API, and only works for EC2. :param name: The name of the public key to import. This must be unique, otherwise an InvalidKeyPair. Duplicate exception is raised. :type name: ``str`` :param keyfile: The filename with the path of the public key to import. :type keyfile: ``str`` :rtype: ``dict`` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_file method') key_pair = self.import_key_pair_from_file(name=name, key_file_path=keyfile) result = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint } return result def ex_find_or_import_keypair_by_key_material(self, pubkey): """ Given a public key, look it up in the EC2 KeyPair database. If it exists, return any information we have about it. Otherwise, create it. Keys that are created are named based on their comment and fingerprint. :rtype: ``dict`` """ key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey) key_comment = get_pubkey_comment(pubkey, default='unnamed') key_name = '%s-%s' % (key_comment, key_fingerprint) key_pairs = self.list_key_pairs() key_pairs = [key_pair for key_pair in key_pairs if key_pair.fingerprint == key_fingerprint] if len(key_pairs) >= 1: key_pair = key_pairs[0] result = { 'keyName': key_pair.name, 'keyFingerprint': key_pair.fingerprint } else: result = self.ex_import_keypair_from_string(key_name, pubkey) return result def ex_list_internet_gateways(self, gateway_ids=None, filters=None): """ Describes available Internet gateways and whether or not they are attached to a VPC. These are required for VPC nodes to communicate over the Internet. :param gateway_ids: Returns only Internet gateways matching the provided Internet gateway IDs. If not specified, a list of all the Internet gateways in the corresponding region is returned. :type gateway_ids: ``list`` :param filters: The filters so the list returned inclues information for certain gateways only. :type filters: ``dict`` :rtype: ``list`` of :class:`.VPCInternetGateway` """ params = {'Action': 'DescribeInternetGateways'} if gateway_ids: params.update(self._pathlist('InternetGatewayId', gateway_ids)) if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params).object return self._to_internet_gateways(response, 'internetGatewaySet/item') def ex_create_internet_gateway(self, name=None): """ Delete a VPC Internet gateway :rtype: ``bool`` """ params = {'Action': 'CreateInternetGateway'} resp = self.connection.request(self.path, params=params).object element = resp.findall(fixxpath(xpath='internetGateway', namespace=NAMESPACE)) gateway = self._to_internet_gateway(element[0], name) if name and self.ex_create_tags(gateway, {'Name': name}): gateway.extra['tags']['Name'] = name return gateway def ex_delete_internet_gateway(self, gateway): """ Deletes a VPC Internet gateway. :param gateway: The gateway to delete :type gateway: :class:`.VPCInternetGateway` :rtype: ``bool`` """ params = {'Action': 'DeleteInternetGateway', 'InternetGatewayId': gateway.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_attach_internet_gateway(self, gateway, network): """ Attach an Internet gateway to a VPC :param gateway: The gateway to attach :type gateway: :class:`.VPCInternetGateway` :param network: The VPC network to attach to :type network: :class:`.EC2Network` :rtype: ``bool`` """ params = {'Action': 'AttachInternetGateway', 'InternetGatewayId': gateway.id, 'VpcId': network.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_detach_internet_gateway(self, gateway, network): """ Detaches an Internet gateway from a VPC. :param gateway: The gateway to detach :type gateway: :class:`.VPCInternetGateway` :param network: The VPC network to detach from :type network: :class:`.EC2Network` :rtype: ``bool`` """ params = {'Action': 'DetachInternetGateway', 'InternetGatewayId': gateway.id, 'VpcId': network.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_list_route_tables(self, route_table_ids=None, filters=None): """ Describes one or more of a VPC's route tables. These are used to determine where network traffic is directed. :param route_table_ids: Returns only route tables matching the provided route table IDs. If not specified, a list of all the route tables in the corresponding region is returned. :type route_table_ids: ``list`` :param filters: The filters so that the list returned includes information for certain route tables only. :type filters: ``dict`` :rtype: ``list`` of :class:`.EC2RouteTable` """ params = {'Action': 'DescribeRouteTables'} if route_table_ids: params.update(self._pathlist('RouteTableId', route_table_ids)) if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params) return self._to_route_tables(response.object) def ex_create_route_table(self, network, name=None): """ Creates a route table within a VPC. :param vpc_id: The VPC that the subnet should be created in. :type vpc_id: :class:`.EC2Network` :rtype: :class: `.EC2RouteTable` """ params = {'Action': 'CreateRouteTable', 'VpcId': network.id} response = self.connection.request(self.path, params=params).object element = response.findall(fixxpath(xpath='routeTable', namespace=NAMESPACE))[0] route_table = self._to_route_table(element, name=name) if name and self.ex_create_tags(route_table, {'Name': name}): route_table.extra['tags']['Name'] = name return route_table def ex_delete_route_table(self, route_table): """ Deletes a VPC route table. :param route_table: The route table to delete. :type route_table: :class:`.EC2RouteTable` :rtype: ``bool`` """ params = {'Action': 'DeleteRouteTable', 'RouteTableId': route_table.id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_associate_route_table(self, route_table, subnet): """ Associates a route table with a subnet within a VPC. Note: A route table can be associated with multiple subnets. :param route_table: The route table to associate. :type route_table: :class:`.EC2RouteTable` :param subnet: The subnet to associate with. :type subnet: :class:`.EC2Subnet` :return: Route table association ID. :rtype: ``str`` """ params = {'Action': 'AssociateRouteTable', 'RouteTableId': route_table.id, 'SubnetId': subnet.id} result = self.connection.request(self.path, params=params).object association_id = findtext(element=result, xpath='associationId', namespace=NAMESPACE) return association_id def ex_dissociate_route_table(self, subnet_association): """ Dissociates a subnet from a route table. :param subnet_association: The subnet association object or subnet association ID. :type subnet_association: :class:`.EC2SubnetAssociation` or ``str`` :rtype: ``bool`` """ if isinstance(subnet_association, EC2SubnetAssociation): subnet_association_id = subnet_association.id else: subnet_association_id = subnet_association params = {'Action': 'DisassociateRouteTable', 'AssociationId': subnet_association_id} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_replace_route_table_association(self, subnet_association, route_table): """ Changes the route table associated with a given subnet in a VPC. Note: This method can be used to change which table is the main route table in the VPC (Specify the main route table's association ID and the route table to be the new main route table). :param subnet_association: The subnet association object or subnet association ID. :type subnet_association: :class:`.EC2SubnetAssociation` or ``str`` :param route_table: The new route table to associate. :type route_table: :class:`.EC2RouteTable` :return: A new route table association ID. :rtype: ``str`` """ if isinstance(subnet_association, EC2SubnetAssociation): subnet_association_id = subnet_association.id else: subnet_association_id = subnet_association params = {'Action': 'ReplaceRouteTableAssociation', 'AssociationId': subnet_association_id, 'RouteTableId': route_table.id} result = self.connection.request(self.path, params=params).object new_association_id = findtext(element=result, xpath='newAssociationId', namespace=NAMESPACE) return new_association_id def ex_create_route(self, route_table, cidr, internet_gateway=None, node=None, network_interface=None, vpc_peering_connection=None): """ Creates a route entry in the route table. :param route_table: The route table to create the route in. :type route_table: :class:`.EC2RouteTable` :param cidr: The CIDR block used for the destination match. :type cidr: ``str`` :param internet_gateway: The Internet gateway to route traffic through. :type internet_gateway: :class:`.VPCInternetGateway` :param node: The NAT instance to route traffic through. :type node: :class:`Node` :param network_interface: The network interface of the node to route traffic through. :type network_interface: :class:`.EC2NetworkInterface` :param vpc_peering_connection: The VPC peering connection. :type vpc_peering_connection: :class:`.VPCPeeringConnection` :rtype: ``bool`` Note: You must specify one of the following: internet_gateway, node, network_interface, vpc_peering_connection. """ params = {'Action': 'CreateRoute', 'RouteTableId': route_table.id, 'DestinationCidrBlock': cidr} if internet_gateway: params['GatewayId'] = internet_gateway.id if node: params['InstanceId'] = node.id if network_interface: params['NetworkInterfaceId'] = network_interface.id if vpc_peering_connection: params['VpcPeeringConnectionId'] = vpc_peering_connection.id res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_delete_route(self, route_table, cidr): """ Deletes a route entry from the route table. :param route_table: The route table to delete the route from. :type route_table: :class:`.EC2RouteTable` :param cidr: The CIDR block used for the destination match. :type cidr: ``str`` :rtype: ``bool`` """ params = {'Action': 'DeleteRoute', 'RouteTableId': route_table.id, 'DestinationCidrBlock': cidr} res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_replace_route(self, route_table, cidr, internet_gateway=None, node=None, network_interface=None, vpc_peering_connection=None): """ Replaces an existing route entry within a route table in a VPC. :param route_table: The route table to replace the route in. :type route_table: :class:`.EC2RouteTable` :param cidr: The CIDR block used for the destination match. :type cidr: ``str`` :param internet_gateway: The new internet gateway to route traffic through. :type internet_gateway: :class:`.VPCInternetGateway` :param node: The new NAT instance to route traffic through. :type node: :class:`Node` :param network_interface: The new network interface of the node to route traffic through. :type network_interface: :class:`.EC2NetworkInterface` :param vpc_peering_connection: The new VPC peering connection. :type vpc_peering_connection: :class:`.VPCPeeringConnection` :rtype: ``bool`` Note: You must specify one of the following: internet_gateway, node, network_interface, vpc_peering_connection. """ params = {'Action': 'ReplaceRoute', 'RouteTableId': route_table.id, 'DestinationCidrBlock': cidr} if internet_gateway: params['GatewayId'] = internet_gateway.id if node: params['InstanceId'] = node.id if network_interface: params['NetworkInterfaceId'] = network_interface.id if vpc_peering_connection: params['VpcPeeringConnectionId'] = vpc_peering_connection.id res = self.connection.request(self.path, params=params).object return self._get_boolean(res) def ex_modify_volume(self, volume, parameters): """ Modify volume parameters. A list of valid parameters can be found at https://goo.gl/N0rPEQ :param volume: Volume instance :type volume: :class:`Volume` :param parameters: Dictionary with updated volume parameters :type parameters: ``dict`` :return: Volume modification status object :rtype: :class:`VolumeModification """ parameters = parameters or {} volume_type = parameters.get('VolumeType') if volume_type and volume_type not in VALID_VOLUME_TYPES: raise ValueError('Invalid volume type specified: %s' % volume_type) parameters.update({'Action': 'ModifyVolume', 'VolumeId': volume.id}) response = self.connection.request(self.path, params=parameters.copy()).object return self._to_volume_modification(response.findall( fixxpath(xpath='volumeModification', namespace=NAMESPACE))[0]) def ex_describe_volumes_modifications(self, dry_run=False, volume_ids=None, filters=None): """ Describes one or more of your volume modifications. :param dry_run: dry_run :type dry_run: ``bool`` :param volume_ids: The volume_ids so that the response includes information for only said volumes :type volume_ids: ``dict`` :param filters: The filters so that the response includes information for only certain volumes :type filters: ``dict`` :return: List of volume modification status objects :rtype: ``list`` of :class:`VolumeModification """ params = {'Action': 'DescribeVolumesModifications'} if dry_run: params.update({'DryRun': dry_run}) if volume_ids: params.update(self._pathlist('VolumeId', volume_ids)) if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params).object return self._to_volume_modifications(response) def _ex_connection_class_kwargs(self): kwargs = super(BaseEC2NodeDriver, self)._ex_connection_class_kwargs() if hasattr(self, 'token') and self.token is not None: kwargs['token'] = self.token # Force signature_version 4 for tokens or auth breaks kwargs['signature_version'] = '4' else: kwargs['signature_version'] = self.signature_version return kwargs def _to_nodes(self, object, xpath): return [self._to_node(el) for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] def _to_node(self, element): try: state = self.NODE_STATE_MAP[findattr(element=element, xpath="instanceState/name", namespace=NAMESPACE) ] except KeyError: state = NodeState.UNKNOWN created = parse_date(findtext(element=element, xpath='launchTime', namespace=NAMESPACE)) instance_id = findtext(element=element, xpath='instanceId', namespace=NAMESPACE) public_ip = findtext(element=element, xpath='ipAddress', namespace=NAMESPACE) public_ips = [public_ip] if public_ip else [] private_ip = findtext(element=element, xpath='privateIpAddress', namespace=NAMESPACE) private_ips = [private_ip] if private_ip else [] product_codes = [] for p in findall(element=element, xpath="productCodesSet/item/productCode", namespace=NAMESPACE): product_codes.append(p) # Get our tags tags = self._get_resource_tags(element) name = tags.get('Name', instance_id) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node']) # Add additional properties to our extra dictionary extra['block_device_mapping'] = self._to_instance_device_mappings( element) extra['groups'] = self._get_security_groups(element) extra['network_interfaces'] = self._to_interfaces(element) extra['product_codes'] = product_codes extra['tags'] = tags return Node(id=instance_id, name=name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, created_at=created, extra=extra) def _to_images(self, object): return [self._to_image(el) for el in object.findall( fixxpath(xpath='imagesSet/item', namespace=NAMESPACE)) ] def _to_image(self, element): id = findtext(element=element, xpath='imageId', namespace=NAMESPACE) name = findtext(element=element, xpath='name', namespace=NAMESPACE) # Build block device mapping block_device_mapping = self._to_device_mappings(element) billing_products = [] for p in findall(element=element, xpath="billingProducts/item/billingProduct", namespace=NAMESPACE): billing_products.append(p.text) # Get our tags tags = self._get_resource_tags(element) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image']) # Add our tags and block device mapping extra['tags'] = tags extra['block_device_mapping'] = block_device_mapping extra['billing_products'] = billing_products return NodeImage(id=id, name=name, driver=self, extra=extra) def _to_volume(self, element, name=None): """ Parse the XML element and return a StorageVolume object. :param name: An optional name for the volume. If not provided then either tag with a key "Name" or volume ID will be used (which ever is available first in that order). :type name: ``str`` :rtype: :class:`StorageVolume` """ volId = findtext(element=element, xpath='volumeId', namespace=NAMESPACE) size = findtext(element=element, xpath='size', namespace=NAMESPACE) raw_state = findtext(element=element, xpath='status', namespace=NAMESPACE) state = self.VOLUME_STATE_MAP.get(raw_state, StorageVolumeState.UNKNOWN) # Get our tags tags = self._get_resource_tags(element) # If name was not passed into the method then # fall back then use the volume id name = name if name else tags.get('Name', volId) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume']) extra['tags'] = tags return StorageVolume(id=volId, name=name, size=int(size), driver=self, state=state, extra=extra) def _to_volume_modifications(self, object): return [self._to_volume_modification(el) for el in object.findall( fixxpath(xpath='volumeModificationSet/item', namespace=NAMESPACE)) ] def _to_volume_modification(self, element): """ Parse the XML element and return a StorageVolume object. :rtype: :class:`EC2VolumeModification` """ params = self._get_extra_dict(element, VOLUME_MODIFICATION_ATTRIBUTE_MAP) return EC2VolumeModification(**params) def _to_snapshots(self, response): return [self._to_snapshot(el) for el in response.findall( fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE)) ] def _to_snapshot(self, element, name=None): snapId = findtext(element=element, xpath='snapshotId', namespace=NAMESPACE) size = findtext(element=element, xpath='volumeSize', namespace=NAMESPACE) created = parse_date(findtext(element=element, xpath='startTime', namespace=NAMESPACE)) # Get our tags tags = self._get_resource_tags(element) # If name was not passed into the method then # fall back then use the snapshot id name = name if name else tags.get('Name', snapId) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot']) # Add tags and name to the extra dict extra['tags'] = tags extra['name'] = name # state state = self.SNAPSHOT_STATE_MAP.get( extra["state"], VolumeSnapshotState.UNKNOWN ) return VolumeSnapshot(snapId, size=int(size), driver=self, extra=extra, created=created, state=state, name=name) def _to_import_snapshot_task(self, element): status = findtext(element=element, xpath='importSnapshotTaskSet/item/' 'snapshotTaskDetail/status', namespace=NAMESPACE) if status != 'completed': snapshotId = None else: xpath = 'importSnapshotTaskSet/item/snapshotTaskDetail/snapshotId' snapshotId = findtext(element=element, xpath=xpath, namespace=NAMESPACE) return EC2ImportSnapshotTask(status, snapshotId=snapshotId) def _to_key_pairs(self, elems): key_pairs = [self._to_key_pair(elem=elem) for elem in elems] return key_pairs def _to_key_pair(self, elem): name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE) fingerprint = findtext(element=elem, xpath='keyFingerprint', namespace=NAMESPACE).strip() private_key = findtext(element=elem, xpath='keyMaterial', namespace=NAMESPACE) key_pair = KeyPair(name=name, public_key=None, fingerprint=fingerprint, private_key=private_key, driver=self) return key_pair def _to_security_groups(self, response): return [self._to_security_group(el) for el in response.findall( fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE)) ] def _to_security_group(self, element): # security group id sg_id = findtext(element=element, xpath='groupId', namespace=NAMESPACE) # security group name name = findtext(element=element, xpath='groupName', namespace=NAMESPACE) # Get our tags tags = self._get_resource_tags(element) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group']) # Add tags to the extra dict extra['tags'] = tags # Get ingress rules ingress_rules = self._to_security_group_rules( element, 'ipPermissions/item' ) # Get egress rules egress_rules = self._to_security_group_rules( element, 'ipPermissionsEgress/item' ) return EC2SecurityGroup(sg_id, name, ingress_rules, egress_rules, extra=extra) def _to_security_group_rules(self, element, xpath): return [self._to_security_group_rule(el) for el in element.findall( fixxpath(xpath=xpath, namespace=NAMESPACE)) ] def _to_security_group_rule(self, element): """ Parse the XML element and return a SecurityGroup object. :rtype: :class:`EC2SecurityGroup` """ rule = {} rule['protocol'] = findtext(element=element, xpath='ipProtocol', namespace=NAMESPACE) rule['from_port'] = findtext(element=element, xpath='fromPort', namespace=NAMESPACE) rule['to_port'] = findtext(element=element, xpath='toPort', namespace=NAMESPACE) # get security groups elements = element.findall(fixxpath( xpath='groups/item', namespace=NAMESPACE )) rule['group_pairs'] = [] for element in elements: item = { 'user_id': findtext( element=element, xpath='userId', namespace=NAMESPACE), 'group_id': findtext( element=element, xpath='groupId', namespace=NAMESPACE), 'group_name': findtext( element=element, xpath='groupName', namespace=NAMESPACE) } rule['group_pairs'].append(item) # get ip ranges elements = element.findall(fixxpath( xpath='ipRanges/item', namespace=NAMESPACE )) rule['cidr_ips'] = [ findtext( element=element, xpath='cidrIp', namespace=NAMESPACE ) for element in elements] return rule def _to_networks(self, response): return [self._to_network(el) for el in response.findall( fixxpath(xpath='vpcSet/item', namespace=NAMESPACE)) ] def _to_network(self, element, name=None): # Get the network id vpc_id = findtext(element=element, xpath='vpcId', namespace=NAMESPACE) # Get our tags tags = self._get_resource_tags(element) # Set our name if the Name key/value if available # If we don't get anything back then use the vpc_id name = name if name else tags.get('Name', vpc_id) cidr_block = findtext(element=element, xpath='cidrBlock', namespace=NAMESPACE) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network']) # Add tags to the extra dict extra['tags'] = tags return EC2Network(vpc_id, name, cidr_block, extra=extra) def _to_addresses(self, response, only_associated): """ Builds a list of dictionaries containing elastic IP properties. :param only_associated: If true, return only those addresses that are associated with an instance. If false, return all addresses. :type only_associated: ``bool`` :rtype: ``list`` of :class:`ElasticIP` """ addresses = [] for el in response.findall(fixxpath(xpath='addressesSet/item', namespace=NAMESPACE)): addr = self._to_address(el, only_associated) if addr is not None: addresses.append(addr) return addresses def _to_address(self, element, only_associated): instance_id = findtext(element=element, xpath='instanceId', namespace=NAMESPACE) public_ip = findtext(element=element, xpath='publicIp', namespace=NAMESPACE) domain = findtext(element=element, xpath='domain', namespace=NAMESPACE) # Build our extra dict extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip']) # Return NoneType if only associated IPs are requested if only_associated and not instance_id: return None return ElasticIP(public_ip, domain, instance_id, extra=extra) def _to_placement_groups(self, response): return [self._to_placement_group(el) for el in response.findall( fixxpath(xpath='placementGroupSet/item', namespace=NAMESPACE))] def _to_placement_group(self, element): name = findtext(element=element, xpath='groupName', namespace=NAMESPACE) state = findtext(element=element, xpath='state', namespace=NAMESPACE) strategy = findtext(element=element, xpath='strategy', namespace=NAMESPACE) return EC2PlacementGroup(name, state, strategy) def _to_subnets(self, response): return [self._to_subnet(el) for el in response.findall( fixxpath(xpath='subnetSet/item', namespace=NAMESPACE)) ] def _to_subnet(self, element, name=None): # Get the subnet ID subnet_id = findtext(element=element, xpath='subnetId', namespace=NAMESPACE) # Get our tags tags = self._get_resource_tags(element) # If we don't get anything back then use the subnet_id name = name if name else tags.get('Name', subnet_id) state = findtext(element=element, xpath='state', namespace=NAMESPACE) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet']) # Also include our tags extra['tags'] = tags return EC2NetworkSubnet(subnet_id, name, state, extra=extra) def _to_interfaces(self, response): return [self._to_interface(el) for el in response.findall( fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE)) ] def _to_interface(self, element, name=None): """ Parse the XML element and return an EC2NetworkInterface object. :param name: An optional name for the interface. If not provided then either tag with a key "Name" or the interface ID will be used (whichever is available first in that order). :type name: ``str`` :rtype: :class: `EC2NetworkInterface` """ interface_id = findtext(element=element, xpath='networkInterfaceId', namespace=NAMESPACE) state = findtext(element=element, xpath='status', namespace=NAMESPACE) # Get tags tags = self._get_resource_tags(element) name = name if name else tags.get('Name', interface_id) # Build security groups groups = self._get_security_groups(element) # Build private IPs priv_ips = [] for item in findall(element=element, xpath='privateIpAddressesSet/item', namespace=NAMESPACE): priv_ips.append({'private_ip': findtext(element=item, xpath='privateIpAddress', namespace=NAMESPACE), 'private_dns': findtext(element=item, xpath='privateDnsName', namespace=NAMESPACE), 'primary': findtext(element=item, xpath='primary', namespace=NAMESPACE)}) # Build our attachment dictionary which we will add into extra later attributes_map = \ RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment'] attachment = self._get_extra_dict(element, attributes_map) # Build our extra dict attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface'] extra = self._get_extra_dict(element, attributes_map) # Include our previously built items as well extra['tags'] = tags extra['attachment'] = attachment extra['private_ips'] = priv_ips extra['groups'] = groups return EC2NetworkInterface(interface_id, name, state, extra=extra) def _to_reserved_nodes(self, object, xpath): return [self._to_reserved_node(el) for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] def _to_reserved_node(self, element): """ Build an EC2ReservedNode object using the reserved instance properties. Information on these properties can be found at http://goo.gl/ulXCC7. """ # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node']) try: size = [size for size in self.list_sizes() if size.id == extra['instance_type']][0] except IndexError: size = None return EC2ReservedNode(id=findtext(element=element, xpath='reservedInstancesId', namespace=NAMESPACE), state=findattr(element=element, xpath='state', namespace=NAMESPACE), driver=self, size=size, extra=extra) def _to_device_mappings(self, object): return [self._to_device_mapping(el) for el in object.findall( fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE)) ] def _to_device_mapping(self, element): """ Parse the XML element and return a dictionary of device properties. Additional information can be found at http://goo.gl/GjWYBf. @note: EBS volumes do not have a virtual name. Only ephemeral disks use this property. :rtype: ``dict`` """ mapping = {} mapping['device_name'] = findattr(element=element, xpath='deviceName', namespace=NAMESPACE) mapping['virtual_name'] = findattr(element=element, xpath='virtualName', namespace=NAMESPACE) # If virtual name does not exist then this is an EBS volume. # Build the EBS dictionary leveraging the _get_extra_dict method. if mapping['virtual_name'] is None: mapping['ebs'] = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume']) return mapping def _to_instance_device_mappings(self, object): return [self._to_instance_device_mapping(el) for el in object.findall( fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE)) ] def _to_instance_device_mapping(self, element): """ Parse the XML element and return a dictionary of device properties. Additional information can be found at https://goo.gl/OGK88a. :rtype: ``dict`` """ mapping = {} mapping['device_name'] = findattr(element=element, xpath='deviceName', namespace=NAMESPACE) mapping['ebs'] = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_instance_block_device']) return mapping def _to_internet_gateways(self, object, xpath): return [self._to_internet_gateway(el) for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] def _to_internet_gateway(self, element, name=None): id = findtext(element=element, xpath='internetGatewayId', namespace=NAMESPACE) vpc_id = findtext(element=element, xpath='attachmentSet/item/vpcId', namespace=NAMESPACE) state = findtext(element=element, xpath='attachmentSet/item/state', namespace=NAMESPACE) # If there's no attachment state, let's # set it to available if not state: state = 'available' # Get our tags tags = self._get_resource_tags(element) # If name was not passed into the method then # fall back then use the gateway id name = name if name else tags.get('Name', id) return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id, state=state, driver=self.connection.driver, extra={'tags': tags}) def _to_route_tables(self, response): return [self._to_route_table(el) for el in response.findall( fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE)) ] def _to_route_table(self, element, name=None): # route table id route_table_id = findtext(element=element, xpath='routeTableId', namespace=NAMESPACE) # Get our tags tags = self._get_resource_tags(element) # Get our extra dictionary extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table']) # Add tags to the extra dict extra['tags'] = tags # Get routes routes = self._to_routes(element, 'routeSet/item') # Get subnet associations subnet_associations = self._to_subnet_associations( element, 'associationSet/item') # Get propagating routes virtual private gateways (VGW) IDs propagating_gateway_ids = [] for el in element.findall(fixxpath(xpath='propagatingVgwSet/item', namespace=NAMESPACE)): propagating_gateway_ids.append(findtext(element=el, xpath='gatewayId', namespace=NAMESPACE)) name = name if name else tags.get('Name', id) return EC2RouteTable(route_table_id, name, routes, subnet_associations, propagating_gateway_ids, extra=extra) def _to_routes(self, element, xpath): return [self._to_route(el) for el in element.findall( fixxpath(xpath=xpath, namespace=NAMESPACE)) ] def _to_route(self, element): """ Parse the XML element and return a route object :rtype: :class: `EC2Route` """ destination_cidr = findtext(element=element, xpath='destinationCidrBlock', namespace=NAMESPACE) gateway_id = findtext(element=element, xpath='gatewayId', namespace=NAMESPACE) instance_id = findtext(element=element, xpath='instanceId', namespace=NAMESPACE) owner_id = findtext(element=element, xpath='instanceOwnerId', namespace=NAMESPACE) interface_id = findtext(element=element, xpath='networkInterfaceId', namespace=NAMESPACE) state = findtext(element=element, xpath='state', namespace=NAMESPACE) origin = findtext(element=element, xpath='origin', namespace=NAMESPACE) vpc_peering_connection_id = findtext(element=element, xpath='vpcPeeringConnectionId', namespace=NAMESPACE) return EC2Route(destination_cidr, gateway_id, instance_id, owner_id, interface_id, state, origin, vpc_peering_connection_id) def _to_subnet_associations(self, element, xpath): return [self._to_subnet_association(el) for el in element.findall( fixxpath(xpath=xpath, namespace=NAMESPACE)) ] def _to_subnet_association(self, element): """ Parse the XML element and return a route table association object :rtype: :class: `EC2SubnetAssociation` """ association_id = findtext(element=element, xpath='routeTableAssociationId', namespace=NAMESPACE) route_table_id = findtext(element=element, xpath='routeTableId', namespace=NAMESPACE) subnet_id = findtext(element=element, xpath='subnetId', namespace=NAMESPACE) main = findtext(element=element, xpath='main', namespace=NAMESPACE) main = True if main else False return EC2SubnetAssociation(association_id, route_table_id, subnet_id, main) def _pathlist(self, key, arr): """ Converts a key and an array of values into AWS query param format. """ params = {} i = 0 for value in arr: i += 1 params['%s.%s' % (key, i)] = value return params def _get_boolean(self, element): tag = '{%s}%s' % (NAMESPACE, 'return') return element.findtext(tag) == 'true' def _get_terminate_boolean(self, element): status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name')) return any([term_status == status for term_status in ('shutting-down', 'terminated')]) def _add_instance_filter(self, params, node): """ Add instance filter to the provided params dictionary. """ filters = {'instance-id': node.id} params.update(self._build_filters(filters)) return params def _get_state_boolean(self, element): """ Checks for the instances's state """ state = findall(element=element, xpath='instancesSet/item/currentState/name', namespace=NAMESPACE)[0].text return state in ('stopping', 'pending', 'starting') def _get_extra_dict(self, element, mapping): """ Extract attributes from the element based on rules provided in the mapping dictionary. :param element: Element to parse the values from. :type element: xml.etree.ElementTree.Element. :param mapping: Dictionary with the extra layout :type node: :class:`Node` :rtype: ``dict`` """ extra = {} for attribute, values in mapping.items(): transform_func = values['transform_func'] value = findattr(element=element, xpath=values['xpath'], namespace=NAMESPACE) if value is not None: extra[attribute] = transform_func(value) else: extra[attribute] = None return extra def _get_resource_tags(self, element): """ Parse tags from the provided element and return a dictionary with key/value pairs. :rtype: ``dict`` """ tags = {} # Get our tag set by parsing the element tag_set = findall(element=element, xpath='tagSet/item', namespace=NAMESPACE) for tag in tag_set: key = findtext(element=tag, xpath='key', namespace=NAMESPACE) value = findtext(element=tag, xpath='value', namespace=NAMESPACE) tags[key] = value return tags def _get_block_device_mapping_params(self, block_device_mapping): """ Return a list of dictionaries with query parameters for a valid block device mapping. :param mapping: List of dictionaries with the drive layout :type mapping: ``list`` or ``dict`` :return: Dictionary representation of the drive mapping :rtype: ``dict`` """ if not isinstance(block_device_mapping, (list, tuple)): raise AttributeError( 'block_device_mapping not list or tuple') params = {} for idx, mapping in enumerate(block_device_mapping): idx += 1 # We want 1-based indexes if not isinstance(mapping, dict): raise AttributeError( 'mapping %s in block_device_mapping ' 'not a dict' % mapping) for k, v in mapping.items(): if not isinstance(v, dict): params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v) else: for key, value in v.items(): params['BlockDeviceMapping.%d.%s.%s' % (idx, k, key)] = str(value) return params def _get_billing_product_params(self, billing_products): """ Return a list of dictionaries with valid param for billing product. :param billing_product: List of billing code values(str) :type billing product: ``list`` :return: Dictionary representation of the billing product codes :rtype: ``dict`` """ if not isinstance(billing_products, (list, tuple)): raise AttributeError( 'billing_products not list or tuple') params = {} for idx, v in enumerate(billing_products): idx += 1 # We want 1-based indexes params['BillingProduct.%d' % (idx)] = str(v) return params def _get_disk_container_params(self, disk_container): """ Return a list of dictionaries with query parameters for a valid disk container. :param disk_container: List of dictionaries with disk_container details :type disk_container: ``list`` or ``dict`` :return: Dictionary representation of the disk_container :rtype: ``dict`` """ if not isinstance(disk_container, (list, tuple)): raise AttributeError('disk_container not list or tuple') params = {} for idx, content in enumerate(disk_container): idx += 1 # We want 1-based indexes if not isinstance(content, dict): raise AttributeError( 'content %s in disk_container not a dict' % content) for k, v in content.items(): if not isinstance(v, dict): params['DiskContainer.%s' % (k)] = str(v) else: for key, value in v.items(): params['DiskContainer.%s.%s' % (k, key)] = str(value) return params def _get_client_data_params(self, client_data): """ Return a dictionary with query parameters for a valid client data. :param client_data: List of dictionaries with the disk upload details :type client_data: ``dict`` :return: Dictionary representation of the client data :rtype: ``dict`` """ if not isinstance(client_data, (list, tuple)): raise AttributeError('client_data not list or tuple') params = {} for idx, content in enumerate(client_data): idx += 1 # We want 1-based indexes if not isinstance(content, dict): raise AttributeError( 'content %s in client_data' 'not a dict' % content) for k, v in content.items(): params['ClientData.%s' % (k)] = str(v) return params def _get_common_security_group_params(self, group_id, protocol, from_port, to_port, cidr_ips, group_pairs): """ Return a dictionary with common query parameters which are used when operating on security groups. :rtype: ``dict`` """ params = {'GroupId': group_id, 'IpPermissions.1.IpProtocol': protocol, 'IpPermissions.1.FromPort': from_port, 'IpPermissions.1.ToPort': to_port} if cidr_ips is not None: ip_ranges = {} for index, cidr_ip in enumerate(cidr_ips): index += 1 ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp' % (index)] = cidr_ip params.update(ip_ranges) if group_pairs is not None: user_groups = {} for index, group_pair in enumerate(group_pairs): index += 1 if 'group_id' in group_pair.keys(): user_groups['IpPermissions.1.Groups.%s.GroupId' % (index)] = group_pair['group_id'] if 'group_name' in group_pair.keys(): user_groups['IpPermissions.1.Groups.%s.GroupName' % (index)] = group_pair['group_name'] if 'user_id' in group_pair.keys(): user_groups['IpPermissions.1.Groups.%s.UserId' % (index)] = group_pair['user_id'] params.update(user_groups) return params def _get_security_groups(self, element): """ Parse security groups from the provided element and return a list of security groups with the id ane name key/value pairs. :rtype: ``list`` of ``dict`` """ groups = [] for item in findall(element=element, xpath='groupSet/item', namespace=NAMESPACE): groups.append({ 'group_id': findtext(element=item, xpath='groupId', namespace=NAMESPACE), 'group_name': findtext(element=item, xpath='groupName', namespace=NAMESPACE) }) return groups def _build_filters(self, filters): """ Return a dictionary with filter query parameters which are used when listing networks, security groups, etc. :param filters: Dict of filter names and filter values :type filters: ``dict`` :rtype: ``dict`` """ filter_entries = {} for filter_idx, filter_data in enumerate(filters.items()): filter_idx += 1 # We want 1-based indexes filter_name, filter_values = filter_data filter_key = 'Filter.%s.Name' % (filter_idx) filter_entries[filter_key] = filter_name if isinstance(filter_values, list): for value_idx, value in enumerate(filter_values): value_idx += 1 # We want 1-based indexes value_key = 'Filter.%s.Value.%s' % (filter_idx, value_idx) filter_entries[value_key] = value else: value_key = 'Filter.%s.Value.1' % (filter_idx) filter_entries[value_key] = filter_values return filter_entries class EC2NodeDriver(BaseEC2NodeDriver): """ Amazon EC2 node driver. """ connectionCls = EC2Connection type = Provider.EC2 name = 'Amazon EC2' website = 'http://aws.amazon.com/ec2/' path = '/' NODE_STATE_MAP = { 'pending': NodeState.PENDING, 'running': NodeState.RUNNING, 'shutting-down': NodeState.UNKNOWN, 'terminated': NodeState.TERMINATED, 'stopped': NodeState.STOPPED } def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us-east-1', token=None, **kwargs): if hasattr(self, '_region'): region = self._region valid_regions = self.list_regions() if region not in valid_regions: raise ValueError('Invalid region: %s' % (region)) details = REGION_DETAILS[region] self.region_name = region self.token = token self.api_name = details['api_name'] self.country = details['country'] self.signature_version = details.get('signature_version', DEFAULT_SIGNATURE_VERSION) host = host or details['endpoint'] super(EC2NodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) @classmethod def list_regions(cls): return VALID_EC2_REGIONS class IdempotentParamError(LibcloudError): """ Request used the same client token as a previous, but non-identical request. """ def __str__(self): return repr(self.value) class EucConnection(EC2Connection): """ Connection class for Eucalyptus """ host = None class EucNodeDriver(BaseEC2NodeDriver): """ Driver class for Eucalyptus """ name = 'Eucalyptus' website = 'http://www.eucalyptus.com/' api_name = 'ec2_us_east' region_name = 'us-east-1' connectionCls = EucConnection signature_version = '2' def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION): """ @inherits: :class:`EC2NodeDriver.__init__` :param path: The host where the API can be reached. :type path: ``str`` :param api_version: The API version to extend support for Eucalyptus proprietary API calls :type api_version: ``str`` """ super(EucNodeDriver, self).__init__(key, secret, secure, host, port) if path is None: path = '/services/Eucalyptus' self.path = path self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version) def list_locations(self): raise NotImplementedError( 'list_locations not implemented for this driver') def _to_sizes(self, response): return [self._to_size(el) for el in response.findall( fixxpath(xpath='instanceTypeDetails/item', namespace=self.EUCA_NAMESPACE))] def _to_size(self, el): name = findtext(element=el, xpath='name', namespace=self.EUCA_NAMESPACE) cpu = findtext(element=el, xpath='cpu', namespace=self.EUCA_NAMESPACE) disk = findtext(element=el, xpath='disk', namespace=self.EUCA_NAMESPACE) memory = findtext(element=el, xpath='memory', namespace=self.EUCA_NAMESPACE) return NodeSize(id=name, name=name, ram=int(memory), disk=int(disk), bandwidth=None, price=None, driver=EucNodeDriver, extra={ 'cpu': int(cpu) }) def list_sizes(self): """ Lists available nodes sizes. :rtype: ``list`` of :class:`NodeSize` """ params = {'Action': 'DescribeInstanceTypes'} response = self.connection.request(self.path, params=params).object return self._to_sizes(response) def _add_instance_filter(self, params, node): """ Eucalyptus driver doesn't support filtering on instance id so this is a no-op. """ pass class NimbusConnection(EC2Connection): """ Connection class for Nimbus """ host = None class NimbusNodeDriver(BaseEC2NodeDriver): """ Driver class for Nimbus """ type = Provider.NIMBUS name = 'Nimbus' website = 'http://www.nimbusproject.org/' country = 'Private' api_name = 'nimbus' region_name = 'nimbus' friendly_name = 'Nimbus Private Cloud' connectionCls = NimbusConnection signature_version = '2' def ex_describe_addresses(self, nodes): """ Nimbus doesn't support elastic IPs, so this is a pass-through. @inherits: :class:`EC2NodeDriver.ex_describe_addresses` """ nodes_elastic_ip_mappings = {} for node in nodes: # empty list per node nodes_elastic_ip_mappings[node.id] = [] return nodes_elastic_ip_mappings def ex_create_tags(self, resource, tags): """ Nimbus doesn't support creating tags, so this is a pass-through. @inherits: :class:`EC2NodeDriver.ex_create_tags` """ pass class OutscaleConnection(EC2Connection): """ Connection class for Outscale """ version = DEFAULT_OUTSCALE_API_VERSION host = None class OutscaleNodeDriver(BaseEC2NodeDriver): """ Base Outscale FCU node driver. Outscale per provider driver classes inherit from it. """ connectionCls = OutscaleConnection name = 'Outscale' website = 'http://www.outscale.com' path = '/' signature_version = '2' NODE_STATE_MAP = { 'pending': NodeState.PENDING, 'running': NodeState.RUNNING, 'shutting-down': NodeState.UNKNOWN, 'terminated': NodeState.TERMINATED, 'stopped': NodeState.STOPPED } def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us-east-1', region_details=None, **kwargs): if hasattr(self, '_region'): region = self._region if region_details is None: raise ValueError('Invalid region_details argument') if region not in region_details.keys(): raise ValueError('Invalid region: %s' % (region)) self.region_name = region self.region_details = region_details details = self.region_details[region] self.api_name = details['api_name'] self.country = details['country'] self.connectionCls.host = details['endpoint'] self._not_implemented_msg =\ 'This method is not supported in the Outscale driver' super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def create_node(self, **kwargs): """ Creates a new Outscale node. The ex_iamprofile keyword is not supported. @inherits: :class:`BaseEC2NodeDriver.create_node` :keyword ex_keyname: The name of the key pair :type ex_keyname: ``str`` :keyword ex_userdata: The user data :type ex_userdata: ``str`` :keyword ex_security_groups: A list of names of security groups to assign to the node. :type ex_security_groups: ``list`` :keyword ex_metadata: The Key/Value metadata to associate with a node. :type ex_metadata: ``dict`` :keyword ex_mincount: The minimum number of nodes to launch :type ex_mincount: ``int`` :keyword ex_maxcount: The maximum number of nodes to launch :type ex_maxcount: ``int`` :keyword ex_clienttoken: A unique identifier to ensure idempotency :type ex_clienttoken: ``str`` :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device mappings. :type ex_blockdevicemappings: ``list`` of ``dict`` :keyword ex_ebs_optimized: EBS-Optimized if True :type ex_ebs_optimized: ``bool`` """ if 'ex_iamprofile' in kwargs: raise NotImplementedError("ex_iamprofile not implemented") return super(OutscaleNodeDriver, self).create_node(**kwargs) def ex_create_network(self, cidr_block, name=None): """ Creates a network/VPC. Outscale does not support instance_tenancy. :param cidr_block: The CIDR block assigned to the network :type cidr_block: ``str`` :param name: An optional name for the network :type name: ``str`` :return: Dictionary of network properties :rtype: ``dict`` """ return super(OutscaleNodeDriver, self).ex_create_network(cidr_block, name=name) def ex_modify_instance_attribute(self, node, disable_api_termination=None, ebs_optimized=None, group_id=None, source_dest_check=None, user_data=None, instance_type=None): """ Modifies node attributes. Ouscale supports the following attributes: 'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n', 'SourceDestCheck.Value', 'UserData.Value', 'InstanceType.Value' :param node: Node instance :type node: :class:`Node` :param attributes: A dictionary with node attributes :type attributes: ``dict`` :return: True on success, False otherwise. :rtype: ``bool`` """ attributes = {} if disable_api_termination is not None: attributes['DisableApiTermination.Value'] = disable_api_termination if ebs_optimized is not None: attributes['EbsOptimized'] = ebs_optimized if group_id is not None: attributes['GroupId.n'] = group_id if source_dest_check is not None: attributes['SourceDestCheck.Value'] = source_dest_check if user_data is not None: attributes['UserData.Value'] = user_data if instance_type is not None: attributes['InstanceType.Value'] = instance_type return super(OutscaleNodeDriver, self).ex_modify_instance_attribute( node, attributes) def ex_register_image(self, name, description=None, architecture=None, root_device_name=None, block_device_mapping=None): """ Registers a Machine Image based off of an EBS-backed instance. Can also be used to create images from snapshots. Outscale does not support image_location, kernel_id and ramdisk_id. :param name: The name for the AMI being registered :type name: ``str`` :param description: The description of the AMI (optional) :type description: ``str`` :param architecture: The architecture of the AMI (i386/x86_64) (optional) :type architecture: ``str`` :param root_device_name: The device name for the root device Required if registering an EBS-backed AMI :type root_device_name: ``str`` :param block_device_mapping: A dictionary of the disk layout (optional) :type block_device_mapping: ``dict`` :rtype: :class:`NodeImage` """ return super(OutscaleNodeDriver, self).ex_register_image( name, description=description, architecture=architecture, root_device_name=root_device_name, block_device_mapping=block_device_mapping) def ex_copy_image(self, source_region, image, name=None, description=None): """ Outscale does not support copying images. @inherits: :class:`EC2NodeDriver.ex_copy_image` """ raise NotImplementedError(self._not_implemented_msg) def ex_get_limits(self): """ Outscale does not support getting limits. @inherits: :class:`EC2NodeDriver.ex_get_limits` """ raise NotImplementedError(self._not_implemented_msg) def ex_create_network_interface(self, subnet, name=None, description=None, private_ip_address=None): """ Outscale does not support creating a network interface within a VPC. @inherits: :class:`EC2NodeDriver.ex_create_network_interface` """ raise NotImplementedError(self._not_implemented_msg) def ex_delete_network_interface(self, network_interface): """ Outscale does not support deleting a network interface within a VPC. @inherits: :class:`EC2NodeDriver.ex_delete_network_interface` """ raise NotImplementedError(self._not_implemented_msg) def ex_attach_network_interface_to_node(self, network_interface, node, device_index): """ Outscale does not support attaching a network interface. @inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node` """ raise NotImplementedError(self._not_implemented_msg) def ex_detach_network_interface(self, attachment_id, force=False): """ Outscale does not support detaching a network interface @inherits: :class:`EC2NodeDriver.ex_detach_network_interface` """ raise NotImplementedError(self._not_implemented_msg) def list_sizes(self, location=None): """ Lists available nodes sizes. This overrides the EC2 default method in order to use Outscale information or data. :rtype: ``list`` of :class:`NodeSize` """ available_types =\ self.region_details[self.region_name]['instance_types'] sizes = [] for instance_type in available_types: attributes = OUTSCALE_INSTANCE_TYPES[instance_type] attributes = copy.deepcopy(attributes) price = self._get_size_price(size_id=instance_type) attributes.update({'price': price}) sizes.append(NodeSize(driver=self, **attributes)) return sizes def ex_modify_instance_keypair(self, instance_id, key_name=None): """ Modifies the keypair associated with a specified instance. Once the modification is done, you must restart the instance. :param instance_id: The ID of the instance :type instance_id: ``string`` :param key_name: The name of the keypair :type key_name: ``string`` """ params = {'Action': 'ModifyInstanceKeypair'} params.update({'instanceId': instance_id}) if key_name is not None: params.update({'keyName': key_name}) response = self.connection.request(self.path, params=params, method='GET').object return (findtext(element=response, xpath='return', namespace=OUTSCALE_NAMESPACE) == 'true') def _to_quota(self, elem): """ To Quota """ quota = {} for reference_quota_item in findall(element=elem, xpath='referenceQuotaSet/item', namespace=OUTSCALE_NAMESPACE): reference = findtext(element=reference_quota_item, xpath='reference', namespace=OUTSCALE_NAMESPACE) quota_set = [] for quota_item in findall(element=reference_quota_item, xpath='quotaSet/item', namespace=OUTSCALE_NAMESPACE): ownerId = findtext(element=quota_item, xpath='ownerId', namespace=OUTSCALE_NAMESPACE) name = findtext(element=quota_item, xpath='name', namespace=OUTSCALE_NAMESPACE) displayName = findtext(element=quota_item, xpath='displayName', namespace=OUTSCALE_NAMESPACE) description = findtext(element=quota_item, xpath='description', namespace=OUTSCALE_NAMESPACE) groupName = findtext(element=quota_item, xpath='groupName', namespace=OUTSCALE_NAMESPACE) maxQuotaValue = findtext(element=quota_item, xpath='maxQuotaValue', namespace=OUTSCALE_NAMESPACE) usedQuotaValue = findtext(element=quota_item, xpath='usedQuotaValue', namespace=OUTSCALE_NAMESPACE) quota_set.append({'ownerId': ownerId, 'name': name, 'displayName': displayName, 'description': description, 'groupName': groupName, 'maxQuotaValue': maxQuotaValue, 'usedQuotaValue': usedQuotaValue}) quota[reference] = quota_set return quota def ex_describe_quotas(self, dry_run=False, filters=None, max_results=None, marker=None): """ Describes one or more of your quotas. :param dry_run: dry_run :type dry_run: ``bool`` :param filters: The filters so that the response returned includes information for certain quotas only. :type filters: ``dict`` :param max_results: The maximum number of items that can be returned in a single page (by default, 100) :type max_results: ``int`` :param marker: Set quota marker :type marker: ``string`` :return: (is_truncated, quota) tuple :rtype: ``(bool, dict)`` """ if filters: raise NotImplementedError( 'quota filters are not implemented') if marker: raise NotImplementedError( 'quota marker is not implemented') params = {'Action': 'DescribeQuotas'} if dry_run: params.update({'DryRun': dry_run}) if max_results: params.update({'MaxResults': max_results}) response = self.connection.request(self.path, params=params, method='GET').object quota = self._to_quota(response) is_truncated = findtext(element=response, xpath='isTruncated', namespace=OUTSCALE_NAMESPACE) return is_truncated, quota def _to_product_type(self, elem): productTypeId = findtext(element=elem, xpath='productTypeId', namespace=OUTSCALE_NAMESPACE) description = findtext(element=elem, xpath='description', namespace=OUTSCALE_NAMESPACE) return {'productTypeId': productTypeId, 'description': description} def ex_get_product_type(self, image_id, snapshot_id=None): """ Gets the product type of a specified OMI or snapshot. :param image_id: The ID of the OMI :type image_id: ``string`` :param snapshot_id: The ID of the snapshot :type snapshot_id: ``string`` :return: A product type :rtype: ``dict`` """ params = {'Action': 'GetProductType'} params.update({'ImageId': image_id}) if snapshot_id is not None: params.update({'SnapshotId': snapshot_id}) response = self.connection.request(self.path, params=params, method='GET').object product_type = self._to_product_type(response) return product_type def _to_product_types(self, elem): product_types = [] for product_types_item in findall(element=elem, xpath='productTypeSet/item', namespace=OUTSCALE_NAMESPACE): productTypeId = findtext(element=product_types_item, xpath='productTypeId', namespace=OUTSCALE_NAMESPACE) description = findtext(element=product_types_item, xpath='description', namespace=OUTSCALE_NAMESPACE) product_types.append({'productTypeId': productTypeId, 'description': description}) return product_types def ex_describe_product_types(self, filters=None): """ Describes product types. :param filters: The filters so that the list returned includes information for certain quotas only. :type filters: ``dict`` :return: A product types list :rtype: ``list`` """ params = {'Action': 'DescribeProductTypes'} if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params, method='GET').object product_types = self._to_product_types(response) return product_types def _to_instance_types(self, elem): instance_types = [] for instance_types_item in findall(element=elem, xpath='instanceTypeSet/item', namespace=OUTSCALE_NAMESPACE): name = findtext(element=instance_types_item, xpath='name', namespace=OUTSCALE_NAMESPACE) vcpu = findtext(element=instance_types_item, xpath='vcpu', namespace=OUTSCALE_NAMESPACE) memory = findtext(element=instance_types_item, xpath='memory', namespace=OUTSCALE_NAMESPACE) storageSize = findtext(element=instance_types_item, xpath='storageSize', namespace=OUTSCALE_NAMESPACE) storageCount = findtext(element=instance_types_item, xpath='storageCount', namespace=OUTSCALE_NAMESPACE) maxIpAddresses = findtext(element=instance_types_item, xpath='maxIpAddresses', namespace=OUTSCALE_NAMESPACE) ebsOptimizedAvailable = findtext(element=instance_types_item, xpath='ebsOptimizedAvailable', namespace=OUTSCALE_NAMESPACE) d = {'name': name, 'vcpu': vcpu, 'memory': memory, 'storageSize': storageSize, 'storageCount': storageCount, 'maxIpAddresses': maxIpAddresses, 'ebsOptimizedAvailable': ebsOptimizedAvailable} instance_types.append(d) return instance_types def ex_describe_instance_types(self, filters=None): """ Describes instance types. :param filters: The filters so that the list returned includes information for instance types only :type filters: ``dict`` :return: A instance types list :rtype: ``list`` """ params = {'Action': 'DescribeInstanceTypes'} if filters: params.update(self._build_filters(filters)) response = self.connection.request(self.path, params=params, method='GET').object instance_types = self._to_instance_types(response) return instance_types class OutscaleSASNodeDriver(OutscaleNodeDriver): """ Outscale SAS node driver """ name = 'Outscale SAS' type = Provider.OUTSCALE_SAS def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us-east-1', region_details=None, **kwargs): super(OutscaleSASNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, region=region, region_details=OUTSCALE_SAS_REGION_DETAILS, **kwargs) class OutscaleINCNodeDriver(OutscaleNodeDriver): """ Outscale INC node driver """ name = 'Outscale INC' type = Provider.OUTSCALE_INC def __init__(self, key, secret=None, secure=True, host=None, port=None, region='us-east-1', region_details=None, **kwargs): super(OutscaleINCNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, region=region, region_details=OUTSCALE_INC_REGION_DETAILS, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/openstack.py0000664000175000017500000025513013153541406023716 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenStack driver """ from libcloud.common.exceptions import BaseHTTPError from libcloud.utils.iso8601 import parse_date try: import simplejson as json except ImportError: import json import warnings import base64 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.py3 import next from libcloud.utils.py3 import urlparse from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.openstack import OpenStackException from libcloud.common.openstack import OpenStackResponse from libcloud.utils.networking import is_public_subnet from libcloud.compute.base import NodeSize, NodeImage from libcloud.compute.base import (NodeDriver, Node, NodeLocation, StorageVolume, VolumeSnapshot) from libcloud.compute.base import KeyPair from libcloud.compute.types import NodeState, StorageVolumeState, Provider, \ VolumeSnapshotState from libcloud.pricing import get_size_price from libcloud.utils.xml import findall from libcloud.utils.py3 import ET __all__ = [ 'OpenStack_1_0_Response', 'OpenStack_1_0_Connection', 'OpenStack_1_0_NodeDriver', 'OpenStack_1_0_SharedIpGroup', 'OpenStack_1_0_NodeIpAddresses', 'OpenStack_1_1_Response', 'OpenStack_1_1_Connection', 'OpenStack_1_1_NodeDriver', 'OpenStack_1_1_FloatingIpPool', 'OpenStack_1_1_FloatingIpAddress', 'OpenStackNodeDriver' ] ATOM_NAMESPACE = "http://www.w3.org/2005/Atom" DEFAULT_API_VERSION = '1.1' class OpenStackComputeConnection(OpenStackBaseConnection): # default config for http://devstack.org/ service_type = 'compute' service_name = 'nova' service_region = 'RegionOne' class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin): """ Base OpenStack node driver. Should not be used directly. """ api_name = 'openstack' name = 'OpenStack' website = 'http://openstack.org/' NODE_STATE_MAP = { 'BUILD': NodeState.PENDING, 'REBUILD': NodeState.PENDING, 'ACTIVE': NodeState.RUNNING, 'SUSPENDED': NodeState.STOPPED, 'SHUTOFF': NodeState.STOPPED, 'DELETED': NodeState.TERMINATED, 'QUEUE_RESIZE': NodeState.PENDING, 'PREP_RESIZE': NodeState.PENDING, 'VERIFY_RESIZE': NodeState.RUNNING, 'PASSWORD': NodeState.PENDING, 'RESCUE': NodeState.PENDING, 'REBOOT': NodeState.REBOOTING, 'HARD_REBOOT': NodeState.REBOOTING, 'SHARE_IP': NodeState.PENDING, 'SHARE_IP_NO_CONFIG': NodeState.PENDING, 'DELETE_IP': NodeState.PENDING, 'ERROR': NodeState.ERROR, 'UNKNOWN': NodeState.UNKNOWN } # http://developer.openstack.org/api-ref-blockstorage-v2.html#volumes-v2 VOLUME_STATE_MAP = { 'creating': StorageVolumeState.CREATING, 'available': StorageVolumeState.AVAILABLE, 'attaching': StorageVolumeState.ATTACHING, 'in-use': StorageVolumeState.INUSE, 'deleting': StorageVolumeState.DELETING, 'error': StorageVolumeState.ERROR, 'error_deleting': StorageVolumeState.ERROR, 'backing-up': StorageVolumeState.BACKUP, 'restoring-backup': StorageVolumeState.BACKUP, 'error_restoring': StorageVolumeState.ERROR, 'error_extending': StorageVolumeState.ERROR, } # http://developer.openstack.org/api-ref-blockstorage-v2.html#ext-backups-v2 SNAPSHOT_STATE_MAP = { 'creating': VolumeSnapshotState.CREATING, 'available': VolumeSnapshotState.AVAILABLE, 'deleting': VolumeSnapshotState.DELETING, 'error': VolumeSnapshotState.ERROR, 'restoring': VolumeSnapshotState.RESTORING, 'error_restoring': VolumeSnapshotState.ERROR } def __new__(cls, key, secret=None, secure=True, host=None, port=None, api_version=DEFAULT_API_VERSION, **kwargs): if cls is OpenStackNodeDriver: if api_version == '1.0': cls = OpenStack_1_0_NodeDriver elif api_version == '1.1': cls = OpenStack_1_1_NodeDriver elif api_version in ['2.0', '2.1', '2.2']: cls = OpenStack_2_NodeDriver else: raise NotImplementedError( "No OpenStackNodeDriver found for API version %s" % (api_version)) return super(OpenStackNodeDriver, cls).__new__(cls) def __init__(self, *args, **kwargs): OpenStackDriverMixin.__init__(self, **kwargs) super(OpenStackNodeDriver, self).__init__(*args, **kwargs) def destroy_node(self, node): uri = '/servers/%s' % (node.id) resp = self.connection.request(uri, method='DELETE') # The OpenStack and Rackspace documentation both say this API will # return a 204, but in-fact, everyone everywhere agrees it actually # returns a 202, so we are going to accept either, and someday, # someone will fix either the implementation or the documentation to # agree. return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) def reboot_node(self, node): return self._reboot_node(node, reboot_type='HARD') def list_nodes(self, ex_all_tenants=False): """ List the nodes in a tenant :param ex_all_tenants: List nodes for all the tenants. Note: Your user must have admin privileges for this functionality to work. :type ex_all_tenants: ``bool`` """ params = {} if ex_all_tenants: params = {'all_tenants': 1} return self._to_nodes( self.connection.request('/servers/detail', params=params).object) def create_volume(self, size, name, location=None, snapshot=None, ex_volume_type=None): """ Create a new volume. :param size: Size of volume in gigabytes (required) :type size: ``int`` :param name: Name of the volume to be created :type name: ``str`` :param location: Which data center to create a volume in. If empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param snapshot: Snapshot from which to create the new volume. (optional) :type snapshot: :class:`.VolumeSnapshot` :param ex_volume_type: What kind of volume to create. (optional) :type ex_volume_type: ``str`` :return: The newly created volume. :rtype: :class:`StorageVolume` """ volume = { 'display_name': name, 'display_description': name, 'size': size, 'metadata': { 'contents': name, }, } if ex_volume_type: volume['volume_type'] = ex_volume_type if location: volume['availability_zone'] = location if snapshot: volume['snapshot_id'] = snapshot.id resp = self.connection.request('/os-volumes', method='POST', data={'volume': volume}) return self._to_volume(resp.object) def destroy_volume(self, volume): return self.connection.request('/os-volumes/%s' % volume.id, method='DELETE').success() def attach_volume(self, node, volume, device="auto"): # when "auto" or None is provided for device, openstack will let # the guest OS pick the next available device (fi. /dev/vdb) return self.connection.request( '/servers/%s/os-volume_attachments' % node.id, method='POST', data={ 'volumeAttachment': { 'volumeId': volume.id, 'device': device, } }).success() def detach_volume(self, volume, ex_node=None): # when ex_node is not provided, volume is detached from all nodes failed_nodes = [] for attachment in volume.extra['attachments']: if not ex_node or ex_node.id == attachment['serverId']: response = self.connection.request( '/servers/%s/os-volume_attachments/%s' % (attachment['serverId'], attachment['id']), method='DELETE') if not response.success(): failed_nodes.append(attachment['serverId']) if failed_nodes: raise OpenStackException( 'detach_volume failed for nodes with id: %s' % ', '.join(failed_nodes), 500, self ) return True def list_volumes(self): return self._to_volumes( self.connection.request('/os-volumes').object) def ex_get_volume(self, volumeId): return self._to_volume( self.connection.request('/os-volumes/%s' % volumeId).object) def list_images(self, location=None, ex_only_active=True): """ Lists all active images @inherits: :class:`NodeDriver.list_images` :param ex_only_active: True if list only active :type ex_only_active: ``bool`` """ return self._to_images( self.connection.request('/images/detail').object, ex_only_active) def get_image(self, image_id): """ Get an image based on an image_id @inherits: :class:`NodeDriver.get_image` :param image_id: Image identifier :type image_id: ``str`` :return: A NodeImage object :rtype: :class:`NodeImage` """ return self._to_image(self.connection.request( '/images/%s' % (image_id,)).object['image']) def list_sizes(self, location=None): return self._to_sizes( self.connection.request('/flavors/detail').object) def list_locations(self): return [NodeLocation(0, '', '', self)] def _ex_connection_class_kwargs(self): return self.openstack_connection_kwargs() def ex_get_node_details(self, node_id): """ Lists details of the specified server. :param node_id: ID of the node which should be used :type node_id: ``str`` :rtype: :class:`Node` """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): node_id = node_id.id uri = '/servers/%s' % (node_id) try: resp = self.connection.request(uri, method='GET') except BaseHTTPError as e: if e.code == httplib.NOT_FOUND: return None raise return self._to_node_from_obj(resp.object) def ex_soft_reboot_node(self, node): """ Soft reboots the specified server :param node: node :type node: :class:`Node` :rtype: ``bool`` """ return self._reboot_node(node, reboot_type='SOFT') def ex_hard_reboot_node(self, node): """ Hard reboots the specified server :param node: node :type node: :class:`Node` :rtype: ``bool`` """ return self._reboot_node(node, reboot_type='HARD') class OpenStackNodeSize(NodeSize): """ NodeSize class for the OpenStack.org driver. Following the example of OpenNebula.org driver and following guidelines: https://issues.apache.org/jira/browse/LIBCLOUD-119 """ def __init__(self, id, name, ram, disk, bandwidth, price, driver, vcpus=None, ephemeral_disk=None, swap=None, extra=None): super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram, disk=disk, bandwidth=bandwidth, price=price, driver=driver) self.vcpus = vcpus self.ephemeral_disk = ephemeral_disk self.swap = swap self.extra = extra def __repr__(self): return (('') % (self.id, self.name, self.ram, self.disk, self.bandwidth, self.price, self.driver.name, self.vcpus)) class OpenStack_1_0_Response(OpenStackResponse): def __init__(self, *args, **kwargs): # done because of a circular reference from # NodeDriver -> Connection -> Response self.node_driver = OpenStack_1_0_NodeDriver super(OpenStack_1_0_Response, self).__init__(*args, **kwargs) class OpenStack_1_0_Connection(OpenStackComputeConnection): responseCls = OpenStack_1_0_Response default_content_type = 'application/xml; charset=UTF-8' accept_format = 'application/xml' XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' class OpenStack_1_0_NodeDriver(OpenStackNodeDriver): """ OpenStack node driver. Extra node attributes: - password: root password, available after create. - hostId: represents the host your cloud server runs on - imageId: id of image - flavorId: id of flavor """ connectionCls = OpenStack_1_0_Connection type = Provider.OPENSTACK features = {'create_node': ['generates_password']} def __init__(self, *args, **kwargs): self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', None)) self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs) def _to_images(self, object, ex_only_active): images = [] for image in findall(object, 'image', self.XML_NAMESPACE): if ex_only_active and image.get('status') != 'ACTIVE': continue images.append(self._to_image(image)) return images def _to_image(self, element): return NodeImage(id=element.get('id'), name=element.get('name'), driver=self.connection.driver, extra={'updated': element.get('updated'), 'created': element.get('created'), 'status': element.get('status'), 'serverId': element.get('serverId'), 'progress': element.get('progress'), 'minDisk': element.get('minDisk'), 'minRam': element.get('minRam') } ) def _change_password_or_name(self, node, name=None, password=None): uri = '/servers/%s' % (node.id) if not name: name = node.name body = {'xmlns': self.XML_NAMESPACE, 'name': name} if password is not None: body['adminPass'] = password server_elm = ET.Element('server', body) resp = self.connection.request( uri, method='PUT', data=ET.tostring(server_elm)) if resp.status == httplib.NO_CONTENT and password is not None: node.extra['password'] = password return resp.status == httplib.NO_CONTENT def create_node(self, **kwargs): """ Create a new node @inherits: :class:`NodeDriver.create_node` :keyword ex_metadata: Key/Value metadata to associate with a node :type ex_metadata: ``dict`` :keyword ex_files: File Path => File contents to create on the node :type ex_files: ``dict`` :keyword ex_shared_ip_group_id: The server is launched into that shared IP group :type ex_shared_ip_group_id: ``str`` """ name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] attributes = {'xmlns': self.XML_NAMESPACE, 'name': name, 'imageId': str(image.id), 'flavorId': str(size.id)} if 'ex_shared_ip_group' in kwargs: # Deprecate this. Be explicit and call the variable # ex_shared_ip_group_id since user needs to pass in the id, not the # name. warnings.warn('ex_shared_ip_group argument is deprecated.' ' Please use ex_shared_ip_group_id') if 'ex_shared_ip_group_id' in kwargs: shared_ip_group_id = kwargs['ex_shared_ip_group_id'] attributes['sharedIpGroupId'] = shared_ip_group_id server_elm = ET.Element('server', attributes) metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {})) if metadata_elm: server_elm.append(metadata_elm) files_elm = self._files_to_xml(kwargs.get("ex_files", {})) if files_elm: server_elm.append(files_elm) resp = self.connection.request("/servers", method='POST', data=ET.tostring(server_elm)) return self._to_node(resp.object) def ex_set_password(self, node, password): """ Sets the Node's root password. This will reboot the instance to complete the operation. :class:`Node.extra['password']` will be set to the new value if the operation was successful. :param node: node to set password :type node: :class:`Node` :param password: new password. :type password: ``str`` :rtype: ``bool`` """ return self._change_password_or_name(node, password=password) def ex_set_server_name(self, node, name): """ Sets the Node's name. This will reboot the instance to complete the operation. :param node: node to set name :type node: :class:`Node` :param name: new name :type name: ``str`` :rtype: ``bool`` """ return self._change_password_or_name(node, name=name) def ex_resize(self, node, size): """ Change an existing server flavor / scale the server up or down. :param node: node to resize. :type node: :class:`Node` :param size: new size. :type size: :class:`NodeSize` :rtype: ``bool`` """ elm = ET.Element( 'resize', {'xmlns': self.XML_NAMESPACE, 'flavorId': str(size.id)} ) resp = self.connection.request("/servers/%s/action" % (node.id), method='POST', data=ET.tostring(elm)) return resp.status == httplib.ACCEPTED def ex_confirm_resize(self, node): """ Confirm a resize request which is currently in progress. If a resize request is not explicitly confirmed or reverted it's automatically confirmed after 24 hours. For more info refer to the API documentation: http://goo.gl/zjFI1 :param node: node for which the resize request will be confirmed. :type node: :class:`Node` :rtype: ``bool`` """ elm = ET.Element( 'confirmResize', {'xmlns': self.XML_NAMESPACE}, ) resp = self.connection.request("/servers/%s/action" % (node.id), method='POST', data=ET.tostring(elm)) return resp.status == httplib.NO_CONTENT def ex_revert_resize(self, node): """ Revert a resize request which is currently in progress. All resizes are automatically confirmed after 24 hours if they have not already been confirmed explicitly or reverted. For more info refer to the API documentation: http://goo.gl/AizBu :param node: node for which the resize request will be reverted. :type node: :class:`Node` :rtype: ``bool`` """ elm = ET.Element( 'revertResize', {'xmlns': self.XML_NAMESPACE} ) resp = self.connection.request("/servers/%s/action" % (node.id), method='POST', data=ET.tostring(elm)) return resp.status == httplib.NO_CONTENT def ex_rebuild(self, node_id, image_id): """ Rebuilds the specified server. :param node_id: ID of the node which should be used :type node_id: ``str`` :param image_id: ID of the image which should be used :type image_id: ``str`` :rtype: ``bool`` """ # @TODO: Remove those ifs in 0.6 if isinstance(node_id, Node): node_id = node_id.id if isinstance(image_id, NodeImage): image_id = image_id.id elm = ET.Element( 'rebuild', {'xmlns': self.XML_NAMESPACE, 'imageId': image_id} ) resp = self.connection.request("/servers/%s/action" % node_id, method='POST', data=ET.tostring(elm)) return resp.status == httplib.ACCEPTED def ex_create_ip_group(self, group_name, node_id=None): """ Creates a shared IP group. :param group_name: group name which should be used :type group_name: ``str`` :param node_id: ID of the node which should be used :type node_id: ``str`` :rtype: ``bool`` """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): node_id = node_id.id group_elm = ET.Element( 'sharedIpGroup', {'xmlns': self.XML_NAMESPACE, 'name': group_name} ) if node_id: ET.SubElement( group_elm, 'server', {'id': node_id} ) resp = self.connection.request('/shared_ip_groups', method='POST', data=ET.tostring(group_elm)) return self._to_shared_ip_group(resp.object) def ex_list_ip_groups(self, details=False): """ Lists IDs and names for shared IP groups. If details lists all details for shared IP groups. :param details: True if details is required :type details: ``bool`` :rtype: ``list`` of :class:`OpenStack_1_0_SharedIpGroup` """ uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups' resp = self.connection.request(uri, method='GET') groups = findall(resp.object, 'sharedIpGroup', self.XML_NAMESPACE) return [self._to_shared_ip_group(el) for el in groups] def ex_delete_ip_group(self, group_id): """ Deletes the specified shared IP group. :param group_id: group id which should be used :type group_id: ``str`` :rtype: ``bool`` """ uri = '/shared_ip_groups/%s' % group_id resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.NO_CONTENT def ex_share_ip(self, group_id, node_id, ip, configure_node=True): """ Shares an IP address to the specified server. :param group_id: group id which should be used :type group_id: ``str`` :param node_id: ID of the node which should be used :type node_id: ``str`` :param ip: ip which should be used :type ip: ``str`` :param configure_node: configure node :type configure_node: ``bool`` :rtype: ``bool`` """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): node_id = node_id.id if configure_node: str_configure = 'true' else: str_configure = 'false' elm = ET.Element( 'shareIp', {'xmlns': self.XML_NAMESPACE, 'sharedIpGroupId': group_id, 'configureServer': str_configure}, ) uri = '/servers/%s/ips/public/%s' % (node_id, ip) resp = self.connection.request(uri, method='PUT', data=ET.tostring(elm)) return resp.status == httplib.ACCEPTED def ex_unshare_ip(self, node_id, ip): """ Removes a shared IP address from the specified server. :param node_id: ID of the node which should be used :type node_id: ``str`` :param ip: ip which should be used :type ip: ``str`` :rtype: ``bool`` """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): node_id = node_id.id uri = '/servers/%s/ips/public/%s' % (node_id, ip) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_list_ip_addresses(self, node_id): """ List all server addresses. :param node_id: ID of the node which should be used :type node_id: ``str`` :rtype: :class:`OpenStack_1_0_NodeIpAddresses` """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): node_id = node_id.id uri = '/servers/%s/ips' % node_id resp = self.connection.request(uri, method='GET') return self._to_ip_addresses(resp.object) def _metadata_to_xml(self, metadata): if len(metadata) == 0: return None metadata_elm = ET.Element('metadata') for k, v in list(metadata.items()): meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)}) meta_elm.text = str(v) return metadata_elm def _files_to_xml(self, files): if len(files) == 0: return None personality_elm = ET.Element('personality') for k, v in list(files.items()): file_elm = ET.SubElement(personality_elm, 'file', {'path': str(k)}) file_elm.text = base64.b64encode(b(v)) return personality_elm def _reboot_node(self, node, reboot_type='SOFT'): resp = self._node_action(node, ['reboot', ('type', reboot_type)]) return resp.status == httplib.ACCEPTED def _node_action(self, node, body): if isinstance(body, list): attr = ' '.join(['%s="%s"' % (item[0], item[1]) for item in body[1:]]) body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr) uri = '/servers/%s/action' % (node.id) resp = self.connection.request(uri, method='POST', data=body) return resp def _to_nodes(self, object): node_elements = findall(object, 'server', self.XML_NAMESPACE) return [self._to_node(el) for el in node_elements] def _to_node_from_obj(self, obj): return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0]) def _to_node(self, el): def get_ips(el): return [ip.get('addr') for ip in el] def get_meta_dict(el): d = {} for meta in el: d[meta.get('key')] = meta.text return d public_ip = get_ips(findall(el, 'addresses/public/ip', self.XML_NAMESPACE)) private_ip = get_ips(findall(el, 'addresses/private/ip', self.XML_NAMESPACE)) metadata = get_meta_dict(findall(el, 'metadata/meta', self.XML_NAMESPACE)) n = Node(id=el.get('id'), name=el.get('name'), state=self.NODE_STATE_MAP.get( el.get('status'), NodeState.UNKNOWN), public_ips=public_ip, private_ips=private_ip, driver=self.connection.driver, extra={ 'password': el.get('adminPass'), 'hostId': el.get('hostId'), 'imageId': el.get('imageId'), 'flavorId': el.get('flavorId'), 'uri': "https://%s%s/servers/%s" % ( self.connection.host, self.connection.request_path, el.get('id')), 'service_name': self.connection.get_service_name(), 'metadata': metadata}) return n def _to_sizes(self, object): elements = findall(object, 'flavor', self.XML_NAMESPACE) return [self._to_size(el) for el in elements] def _to_size(self, el): vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None return OpenStackNodeSize(id=el.get('id'), name=el.get('name'), ram=int(el.get('ram')), disk=int(el.get('disk')), # XXX: needs hardcode vcpus=vcpus, bandwidth=None, # Hardcoded price=self._get_size_price(el.get('id')), driver=self.connection.driver) def ex_limits(self): """ Extra call to get account's limits, such as rates (for example amount of POST requests per day) and absolute limits like total amount of available RAM to be used by servers. :return: dict with keys 'rate' and 'absolute' :rtype: ``dict`` """ def _to_rate(el): rate = {} for item in list(el.items()): rate[item[0]] = item[1] return rate def _to_absolute(el): return {el.get('name'): el.get('value')} limits = self.connection.request("/limits").object rate = [_to_rate(el) for el in findall(limits, 'rate/limit', self.XML_NAMESPACE)] absolute = {} for item in findall(limits, 'absolute/limit', self.XML_NAMESPACE): absolute.update(_to_absolute(item)) return {"rate": rate, "absolute": absolute} def create_image(self, node, name, description=None, reboot=True): """Create an image for node. @inherits: :class:`NodeDriver.create_image` :param node: node to use as a base for image :type node: :class:`Node` :param name: name for new image :type name: ``str`` :rtype: :class:`NodeImage` """ image_elm = ET.Element( 'image', {'xmlns': self.XML_NAMESPACE, 'name': name, 'serverId': node.id} ) return self._to_image( self.connection.request("/images", method="POST", data=ET.tostring(image_elm)).object) def delete_image(self, image): """Delete an image for node. @inherits: :class:`NodeDriver.delete_image` :param image: the image to be deleted :type image: :class:`NodeImage` :rtype: ``bool`` """ uri = '/images/%s' % image.id resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.NO_CONTENT def _to_shared_ip_group(self, el): servers_el = findall(el, 'servers', self.XML_NAMESPACE) if servers_el: servers = [s.get('id') for s in findall(servers_el[0], 'server', self.XML_NAMESPACE)] else: servers = None return OpenStack_1_0_SharedIpGroup(id=el.get('id'), name=el.get('name'), servers=servers) def _to_ip_addresses(self, el): public_ips = [ip.get('addr') for ip in findall( findall(el, 'public', self.XML_NAMESPACE)[0], 'ip', self.XML_NAMESPACE)] private_ips = [ip.get('addr') for ip in findall( findall(el, 'private', self.XML_NAMESPACE)[0], 'ip', self.XML_NAMESPACE)] return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips) def _get_size_price(self, size_id): try: return get_size_price(driver_type='compute', driver_name=self.api_name, size_id=size_id) except KeyError: return 0.0 class OpenStack_1_0_SharedIpGroup(object): """ Shared IP group info. """ def __init__(self, id, name, servers=None): self.id = str(id) self.name = name self.servers = servers class OpenStack_1_0_NodeIpAddresses(object): """ List of public and private IP addresses of a Node. """ def __init__(self, public_addresses, private_addresses): self.public_addresses = public_addresses self.private_addresses = private_addresses class OpenStack_1_1_Response(OpenStackResponse): def __init__(self, *args, **kwargs): # done because of a circular reference from # NodeDriver -> Connection -> Response self.node_driver = OpenStack_1_1_NodeDriver super(OpenStack_1_1_Response, self).__init__(*args, **kwargs) class OpenStackNetwork(object): """ A Virtual Network. """ def __init__(self, id, name, cidr, driver, extra=None): self.id = str(id) self.name = name self.cidr = cidr self.driver = driver self.extra = extra or {} def __repr__(self): return '' % (self.id, self.name, self.cidr,) class OpenStackSecurityGroup(object): """ A Security Group. """ def __init__(self, id, tenant_id, name, description, driver, rules=None, extra=None): """ Constructor. :keyword id: Group id. :type id: ``str`` :keyword tenant_id: Owner of the security group. :type tenant_id: ``str`` :keyword name: Human-readable name for the security group. Might not be unique. :type name: ``str`` :keyword description: Human-readable description of a security group. :type description: ``str`` :keyword rules: Rules associated with this group. :type rules: ``list`` of :class:`OpenStackSecurityGroupRule` :keyword extra: Extra attributes associated with this group. :type extra: ``dict`` """ self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.driver = driver self.rules = rules or [] self.extra = extra or {} def __repr__(self): return ('' % (self.id, self.tenant_id, self.name, self.description)) class OpenStackSecurityGroupRule(object): """ A Rule of a Security Group. """ def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port, driver, ip_range=None, group=None, tenant_id=None, extra=None): """ Constructor. :keyword id: Rule id. :type id: ``str`` :keyword parent_group_id: ID of the parent security group. :type parent_group_id: ``str`` :keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc). :type ip_protocol: ``str`` :keyword from_port: Port at start of range. :type from_port: ``int`` :keyword to_port: Port at end of range. :type to_port: ``int`` :keyword ip_range: CIDR for address range. :type ip_range: ``str`` :keyword group: Name of a source security group to apply to rule. :type group: ``str`` :keyword tenant_id: Owner of the security group. :type tenant_id: ``str`` :keyword extra: Extra attributes associated with this rule. :type extra: ``dict`` """ self.id = id self.parent_group_id = parent_group_id self.ip_protocol = ip_protocol self.from_port = from_port self.to_port = to_port self.driver = driver self.ip_range = '' self.group = {} if group is None: self.ip_range = ip_range else: self.group = {'name': group, 'tenant_id': tenant_id} self.tenant_id = tenant_id self.extra = extra or {} def __repr__(self): return ('' % (self.id, self.parent_group_id, self.ip_protocol, self.from_port, self.to_port)) class OpenStackKeyPair(object): """ A KeyPair. """ def __init__(self, name, fingerprint, public_key, driver, private_key=None, extra=None): """ Constructor. :keyword name: Name of the KeyPair. :type name: ``str`` :keyword fingerprint: Fingerprint of the KeyPair :type fingerprint: ``str`` :keyword public_key: Public key in OpenSSH format. :type public_key: ``str`` :keyword private_key: Private key in PEM format. :type private_key: ``str`` :keyword extra: Extra attributes associated with this KeyPair. :type extra: ``dict`` """ self.name = name self.fingerprint = fingerprint self.public_key = public_key self.private_key = private_key self.driver = driver self.extra = extra or {} def __repr__(self): return ('' % (self.name, self.fingerprint, self.public_key)) class OpenStack_1_1_Connection(OpenStackComputeConnection): responseCls = OpenStack_1_1_Response accept_format = 'application/json' default_content_type = 'application/json; charset=UTF-8' def encode_data(self, data): return json.dumps(data) class OpenStack_1_1_NodeDriver(OpenStackNodeDriver): """ OpenStack node driver. """ connectionCls = OpenStack_1_1_Connection type = Provider.OPENSTACK features = {"create_node": ["generates_password"]} _networks_url_prefix = '/os-networks' def __init__(self, *args, **kwargs): self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', None)) super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs) def create_node(self, **kwargs): """Create a new node @inherits: :class:`NodeDriver.create_node` :keyword ex_keyname: The name of the key pair :type ex_keyname: ``str`` :keyword ex_userdata: String containing user data see https://help.ubuntu.com/community/CloudInit :type ex_userdata: ``str`` :keyword ex_config_drive: Enable config drive see http://docs.openstack.org/grizzly/openstack-compute/admin/content/config-drive.html :type ex_config_drive: ``bool`` :keyword ex_security_groups: List of security groups to assign to the node :type ex_security_groups: ``list`` of :class:`OpenStackSecurityGroup` :keyword ex_metadata: Key/Value metadata to associate with a node :type ex_metadata: ``dict`` :keyword ex_files: File Path => File contents to create on the no de :type ex_files: ``dict`` :keyword networks: The server is launched into a set of Networks. :type networks: ``list`` of :class:`OpenStackNetwork` :keyword ex_disk_config: Name of the disk configuration. Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` :keyword ex_config_drive: If True enables metadata injection in a server through a configuration drive. :type ex_config_drive: ``bool`` :keyword ex_admin_pass: The root password for the node :type ex_admin_pass: ``str`` :keyword ex_availability_zone: Nova availability zone for the node :type ex_availability_zone: ``str`` """ server_params = self._create_args_to_params(None, **kwargs) resp = self.connection.request("/servers", method='POST', data={'server': server_params}) create_response = resp.object['server'] server_resp = self.connection.request( '/servers/%s' % create_response['id']) server_object = server_resp.object['server'] # adminPass is not always present # http://docs.openstack.org/essex/openstack-compute/admin/ # content/configuring-compute-API.html#d6e1833 server_object['adminPass'] = create_response.get('adminPass', None) return self._to_node(server_object) def _to_images(self, obj, ex_only_active): images = [] for image in obj['images']: if ex_only_active and image.get('status') != 'ACTIVE': continue images.append(self._to_image(image)) return images def _to_image(self, api_image): server = api_image.get('server', {}) return NodeImage( id=api_image['id'], name=api_image['name'], driver=self, extra=dict( updated=api_image['updated'], created=api_image['created'], status=api_image['status'], progress=api_image.get('progress'), metadata=api_image.get('metadata'), serverId=server.get('id'), minDisk=api_image.get('minDisk'), minRam=api_image.get('minRam'), ) ) def _to_nodes(self, obj): servers = obj['servers'] return [self._to_node(server) for server in servers] def _to_volumes(self, obj): volumes = obj['volumes'] return [self._to_volume(volume) for volume in volumes] def _to_snapshots(self, obj): snapshots = obj['snapshots'] return [self._to_snapshot(snapshot) for snapshot in snapshots] def _to_sizes(self, obj): flavors = obj['flavors'] return [self._to_size(flavor) for flavor in flavors] def _create_args_to_params(self, node, **kwargs): server_params = { 'name': kwargs.get('name'), 'metadata': kwargs.get('ex_metadata', {}), 'personality': self._files_to_personality(kwargs.get("ex_files", {})) } if 'ex_availability_zone' in kwargs: server_params['availability_zone'] = kwargs['ex_availability_zone'] if 'ex_keyname' in kwargs: server_params['key_name'] = kwargs['ex_keyname'] if 'ex_userdata' in kwargs: server_params['user_data'] = base64.b64encode( b(kwargs['ex_userdata'])).decode('ascii') if 'ex_config_drive' in kwargs: server_params['config_drive'] = kwargs['ex_config_drive'] if 'ex_disk_config' in kwargs: server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config'] if 'ex_config_drive' in kwargs: server_params['config_drive'] = str(kwargs['ex_config_drive']) if 'ex_admin_pass' in kwargs: server_params['adminPass'] = kwargs['ex_admin_pass'] if 'networks' in kwargs: networks = kwargs['networks'] networks = [{'uuid': network.id} for network in networks] server_params['networks'] = networks if 'ex_security_groups' in kwargs: server_params['security_groups'] = [] for security_group in kwargs['ex_security_groups']: name = security_group.name server_params['security_groups'].append({'name': name}) if 'ex_blockdevicemappings' in kwargs: server_params['block_device_mapping_v2'] = \ kwargs['ex_blockdevicemappings'] if 'name' in kwargs: server_params['name'] = kwargs.get('name') else: server_params['name'] = node.name if 'image' in kwargs: server_params['imageRef'] = kwargs.get('image').id else: server_params['imageRef'] = node.extra.get('imageId') if 'size' in kwargs: server_params['flavorRef'] = kwargs.get('size').id else: server_params['flavorRef'] = node.extra.get('flavorId') return server_params def _files_to_personality(self, files): rv = [] for k, v in list(files.items()): rv.append({'path': k, 'contents': base64.b64encode(b(v))}) return rv def _reboot_node(self, node, reboot_type='SOFT'): resp = self._node_action(node, 'reboot', type=reboot_type) return resp.status == httplib.ACCEPTED def ex_set_password(self, node, password): """ Changes the administrator password for a specified server. :param node: Node to rebuild. :type node: :class:`Node` :param password: The administrator password. :type password: ``str`` :rtype: ``bool`` """ resp = self._node_action(node, 'changePassword', adminPass=password) node.extra['password'] = password return resp.status == httplib.ACCEPTED def ex_rebuild(self, node, image, **kwargs): """ Rebuild a Node. :param node: Node to rebuild. :type node: :class:`Node` :param image: New image to use. :type image: :class:`NodeImage` :keyword ex_metadata: Key/Value metadata to associate with a node :type ex_metadata: ``dict`` :keyword ex_files: File Path => File contents to create on the no de :type ex_files: ``dict`` :keyword ex_keyname: Name of existing public key to inject into instance :type ex_keyname: ``str`` :keyword ex_userdata: String containing user data see https://help.ubuntu.com/community/CloudInit :type ex_userdata: ``str`` :keyword ex_security_groups: List of security groups to assign to the node :type ex_security_groups: ``list`` of :class:`OpenStackSecurityGroup` :keyword ex_disk_config: Name of the disk configuration. Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` :keyword ex_config_drive: If True enables metadata injection in a server through a configuration drive. :type ex_config_drive: ``bool`` :rtype: ``bool`` """ server_params = self._create_args_to_params(node, image=image, **kwargs) resp = self._node_action(node, 'rebuild', **server_params) return resp.status == httplib.ACCEPTED def ex_resize(self, node, size): """ Change a node size. :param node: Node to resize. :type node: :class:`Node` :type size: :class:`NodeSize` :param size: New size to use. :rtype: ``bool`` """ server_params = self._create_args_to_params(node, size=size) resp = self._node_action(node, 'resize', **server_params) return resp.status == httplib.ACCEPTED def ex_confirm_resize(self, node): """ Confirms a pending resize action. :param node: Node to resize. :type node: :class:`Node` :rtype: ``bool`` """ resp = self._node_action(node, 'confirmResize') return resp.status == httplib.NO_CONTENT def ex_revert_resize(self, node): """ Cancels and reverts a pending resize action. :param node: Node to resize. :type node: :class:`Node` :rtype: ``bool`` """ resp = self._node_action(node, 'revertResize') return resp.status == httplib.ACCEPTED def create_image(self, node, name, metadata=None): """ Creates a new image. :param node: Node :type node: :class:`Node` :param name: The name for the new image. :type name: ``str`` :param metadata: Key and value pairs for metadata. :type metadata: ``dict`` :rtype: :class:`NodeImage` """ optional_params = {} if metadata: optional_params['metadata'] = metadata resp = self._node_action(node, 'createImage', name=name, **optional_params) image_id = self._extract_image_id_from_url(resp.headers['location']) return self.get_image(image_id=image_id) def ex_set_server_name(self, node, name): """ Sets the Node's name. :param node: Node :type node: :class:`Node` :param name: The name of the server. :type name: ``str`` :rtype: :class:`Node` """ return self._update_node(node, name=name) def ex_get_metadata(self, node): """ Get a Node's metadata. :param node: Node :type node: :class:`Node` :return: Key/Value metadata associated with node. :rtype: ``dict`` """ return self.connection.request( '/servers/%s/metadata' % (node.id,), method='GET',).object['metadata'] def ex_set_metadata(self, node, metadata): """ Sets the Node's metadata. :param node: Node :type node: :class:`Node` :param metadata: Key/Value metadata to associate with a node :type metadata: ``dict`` :rtype: ``dict`` """ return self.connection.request( '/servers/%s/metadata' % (node.id,), method='PUT', data={'metadata': metadata} ).object['metadata'] def ex_update_node(self, node, **node_updates): """ Update the Node's editable attributes. The OpenStack API currently supports editing name and IPv4/IPv6 access addresses. The driver currently only supports updating the node name. :param node: Node :type node: :class:`Node` :keyword name: New name for the server :type name: ``str`` :rtype: :class:`Node` """ potential_data = self._create_args_to_params(node, **node_updates) updates = {'name': potential_data['name']} return self._update_node(node, **updates) def _to_networks(self, obj): networks = obj['networks'] return [self._to_network(network) for network in networks] def _to_network(self, obj): return OpenStackNetwork(id=obj['id'], name=obj['label'], cidr=obj.get('cidr', None), driver=self) def ex_list_networks(self): """ Get a list of Networks that are available. :rtype: ``list`` of :class:`OpenStackNetwork` """ response = self.connection.request(self._networks_url_prefix).object return self._to_networks(response) def ex_create_network(self, name, cidr): """ Create a new Network :param name: Name of network which should be used :type name: ``str`` :param cidr: cidr of network which should be used :type cidr: ``str`` :rtype: :class:`OpenStackNetwork` """ data = {'network': {'cidr': cidr, 'label': name}} response = self.connection.request(self._networks_url_prefix, method='POST', data=data).object return self._to_network(response['network']) def ex_delete_network(self, network): """ Get a list of NodeNetorks that are available. :param network: Network which should be used :type network: :class:`OpenStackNetwork` :rtype: ``bool`` """ resp = self.connection.request('%s/%s' % (self._networks_url_prefix, network.id), method='DELETE') return resp.status == httplib.ACCEPTED def ex_get_console_output(self, node, length=None): """ Get console output :param node: node :type node: :class:`Node` :param length: Optional number of lines to fetch from the console log :type length: ``int`` :return: Dictionary with the output :rtype: ``dict`` """ data = { "os-getConsoleOutput": { "length": length } } resp = self.connection.request('/servers/%s/action' % node.id, method='POST', data=data).object return resp def ex_list_snapshots(self): return self._to_snapshots( self.connection.request('/os-snapshots').object) def list_volume_snapshots(self, volume): return [snapshot for snapshot in self.ex_list_snapshots() if snapshot.extra['volume_id'] == volume.id] def create_volume_snapshot(self, volume, name=None, ex_description=None, ex_force=True): """ Create snapshot from volume :param volume: Instance of `StorageVolume` :type volume: `StorageVolume` :param name: Name of snapshot (optional) :type name: `str` | `NoneType` :param ex_description: Description of the snapshot (optional) :type ex_description: `str` | `NoneType` :param ex_force: Specifies if we create a snapshot that is not in state `available`. For example `in-use`. Defaults to True. (optional) :type ex_force: `bool` :rtype: :class:`VolumeSnapshot` """ data = {'snapshot': {'volume_id': volume.id, 'force': ex_force}} if name is not None: data['snapshot']['display_name'] = name if ex_description is not None: data['snapshot']['display_description'] = ex_description return self._to_snapshot(self.connection.request('/os-snapshots', method='POST', data=data).object) def destroy_volume_snapshot(self, snapshot): resp = self.connection.request('/os-snapshots/%s' % snapshot.id, method='DELETE') return resp.status == httplib.NO_CONTENT def ex_create_snapshot(self, volume, name, description=None, force=False): """ Create a snapshot based off of a volume. :param volume: volume :type volume: :class:`StorageVolume` :keyword name: New name for the volume snapshot :type name: ``str`` :keyword description: Description of the snapshot (optional) :type description: ``str`` :keyword force: Whether to force creation (optional) :type force: ``bool`` :rtype: :class:`VolumeSnapshot` """ warnings.warn('This method has been deprecated in favor of the ' 'create_volume_snapshot method') return self.create_volume_snapshot(volume, name, ex_description=description, ex_force=force) def ex_delete_snapshot(self, snapshot): """ Delete a VolumeSnapshot :param snapshot: snapshot :type snapshot: :class:`VolumeSnapshot` :rtype: ``bool`` """ warnings.warn('This method has been deprecated in favor of the ' 'destroy_volume_snapshot method') return self.destroy_volume_snapshot(snapshot) def _to_security_group_rules(self, obj): return [self._to_security_group_rule(security_group_rule) for security_group_rule in obj] def _to_security_group_rule(self, obj): ip_range = group = tenant_id = None if obj['group'] == {}: ip_range = obj['ip_range'].get('cidr', None) else: group = obj['group'].get('name', None) tenant_id = obj['group'].get('tenant_id', None) return OpenStackSecurityGroupRule( id=obj['id'], parent_group_id=obj['parent_group_id'], ip_protocol=obj['ip_protocol'], from_port=obj['from_port'], to_port=obj['to_port'], driver=self, ip_range=ip_range, group=group, tenant_id=tenant_id) def _to_security_groups(self, obj): security_groups = obj['security_groups'] return [self._to_security_group(security_group) for security_group in security_groups] def _to_security_group(self, obj): rules = self._to_security_group_rules(obj.get('rules', [])) return OpenStackSecurityGroup(id=obj['id'], tenant_id=obj['tenant_id'], name=obj['name'], description=obj.get('description', ''), rules=rules, driver=self) def ex_list_security_groups(self): """ Get a list of Security Groups that are available. :rtype: ``list`` of :class:`OpenStackSecurityGroup` """ return self._to_security_groups( self.connection.request('/os-security-groups').object) def ex_get_node_security_groups(self, node): """ Get Security Groups of the specified server. :rtype: ``list`` of :class:`OpenStackSecurityGroup` """ return self._to_security_groups( self.connection.request('/servers/%s/os-security-groups' % (node.id)).object) def ex_create_security_group(self, name, description): """ Create a new Security Group :param name: Name of the new Security Group :type name: ``str`` :param description: Description of the new Security Group :type description: ``str`` :rtype: :class:`OpenStackSecurityGroup` """ return self._to_security_group(self.connection.request( '/os-security-groups', method='POST', data={'security_group': {'name': name, 'description': description}} ).object['security_group']) def ex_delete_security_group(self, security_group): """ Delete a Security Group. :param security_group: Security Group should be deleted :type security_group: :class:`OpenStackSecurityGroup` :rtype: ``bool`` """ resp = self.connection.request('/os-security-groups/%s' % (security_group.id), method='DELETE') return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) def ex_create_security_group_rule(self, security_group, ip_protocol, from_port, to_port, cidr=None, source_security_group=None): """ Create a new Rule in a Security Group :param security_group: Security Group in which to add the rule :type security_group: :class:`OpenStackSecurityGroup` :param ip_protocol: Protocol to which this rule applies Examples: tcp, udp, ... :type ip_protocol: ``str`` :param from_port: First port of the port range :type from_port: ``int`` :param to_port: Last port of the port range :type to_port: ``int`` :param cidr: CIDR notation of the source IP range for this rule :type cidr: ``str`` :param source_security_group: Existing Security Group to use as the source (instead of CIDR) :type source_security_group: L{OpenStackSecurityGroup :rtype: :class:`OpenStackSecurityGroupRule` """ source_security_group_id = None if type(source_security_group) == OpenStackSecurityGroup: source_security_group_id = source_security_group.id return self._to_security_group_rule(self.connection.request( '/os-security-group-rules', method='POST', data={'security_group_rule': { 'ip_protocol': ip_protocol, 'from_port': from_port, 'to_port': to_port, 'cidr': cidr, 'group_id': source_security_group_id, 'parent_group_id': security_group.id}} ).object['security_group_rule']) def ex_delete_security_group_rule(self, rule): """ Delete a Rule from a Security Group. :param rule: Rule should be deleted :type rule: :class:`OpenStackSecurityGroupRule` :rtype: ``bool`` """ resp = self.connection.request('/os-security-group-rules/%s' % (rule.id), method='DELETE') return resp.status == httplib.NO_CONTENT def _to_key_pairs(self, obj): key_pairs = obj['keypairs'] key_pairs = [self._to_key_pair(key_pair['keypair']) for key_pair in key_pairs] return key_pairs def _to_key_pair(self, obj): key_pair = KeyPair(name=obj['name'], fingerprint=obj['fingerprint'], public_key=obj['public_key'], private_key=obj.get('private_key', None), driver=self) return key_pair def list_key_pairs(self): response = self.connection.request('/os-keypairs') key_pairs = self._to_key_pairs(response.object) return key_pairs def get_key_pair(self, name): self.connection.set_context({'key_pair_name': name}) response = self.connection.request('/os-keypairs/%s' % (name)) key_pair = self._to_key_pair(response.object['keypair']) return key_pair def create_key_pair(self, name): data = {'keypair': {'name': name}} response = self.connection.request('/os-keypairs', method='POST', data=data) key_pair = self._to_key_pair(response.object['keypair']) return key_pair def import_key_pair_from_string(self, name, key_material): data = {'keypair': {'name': name, 'public_key': key_material}} response = self.connection.request('/os-keypairs', method='POST', data=data) key_pair = self._to_key_pair(response.object['keypair']) return key_pair def delete_key_pair(self, key_pair): """ Delete a KeyPair. :param keypair: KeyPair to delete :type keypair: :class:`OpenStackKeyPair` :rtype: ``bool`` """ response = self.connection.request('/os-keypairs/%s' % (key_pair.name), method='DELETE') return response.status == httplib.ACCEPTED def ex_list_keypairs(self): """ Get a list of KeyPairs that are available. :rtype: ``list`` of :class:`OpenStackKeyPair` """ warnings.warn('This method has been deprecated in favor of ' 'list_key_pairs method') return self.list_key_pairs() def ex_create_keypair(self, name): """ Create a new KeyPair :param name: Name of the new KeyPair :type name: ``str`` :rtype: :class:`OpenStackKeyPair` """ warnings.warn('This method has been deprecated in favor of ' 'create_key_pair method') return self.create_key_pair(name=name) def ex_import_keypair(self, name, keyfile): """ Import a KeyPair from a file :param name: Name of the new KeyPair :type name: ``str`` :param keyfile: Path to the public key file (in OpenSSH format) :type keyfile: ``str`` :rtype: :class:`OpenStackKeyPair` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_file method') return self.import_key_pair_from_file(name=name, key_file_path=keyfile) def ex_import_keypair_from_string(self, name, key_material): """ Import a KeyPair from a string :param name: Name of the new KeyPair :type name: ``str`` :param key_material: Public key (in OpenSSH format) :type key_material: ``str`` :rtype: :class:`OpenStackKeyPair` """ warnings.warn('This method has been deprecated in favor of ' 'import_key_pair_from_string method') return self.import_key_pair_from_string(name=name, key_material=key_material) def ex_delete_keypair(self, keypair): """ Delete a KeyPair. :param keypair: KeyPair to delete :type keypair: :class:`OpenStackKeyPair` :rtype: ``bool`` """ warnings.warn('This method has been deprecated in favor of ' 'delete_key_pair method') return self.delete_key_pair(key_pair=keypair) def ex_get_size(self, size_id): """ Get a NodeSize :param size_id: ID of the size which should be used :type size_id: ``str`` :rtype: :class:`NodeSize` """ return self._to_size(self.connection.request( '/flavors/%s' % (size_id,)) .object['flavor']) def get_image(self, image_id): """ Get a NodeImage @inherits: :class:`NodeDriver.get_image` :param image_id: ID of the image which should be used :type image_id: ``str`` :rtype: :class:`NodeImage` """ return self._to_image(self.connection.request( '/images/%s' % (image_id,)).object['image']) def delete_image(self, image): """ Delete a NodeImage @inherits: :class:`NodeDriver.delete_image` :param image: image witch should be used :type image: :class:`NodeImage` :rtype: ``bool`` """ resp = self.connection.request('/images/%s' % (image.id,), method='DELETE') return resp.status == httplib.NO_CONTENT def _node_action(self, node, action, **params): params = params or None return self.connection.request('/servers/%s/action' % (node.id,), method='POST', data={action: params}) def _update_node(self, node, **node_updates): """ Updates the editable attributes of a server, which currently include its name and IPv4/IPv6 access addresses. """ return self._to_node( self.connection.request( '/servers/%s' % (node.id,), method='PUT', data={'server': node_updates} ).object['server'] ) def _to_node_from_obj(self, obj): return self._to_node(obj['server']) def _to_node(self, api_node): public_networks_labels = ['public', 'internet'] public_ips, private_ips = [], [] for label, values in api_node['addresses'].items(): for value in values: ip = value['addr'] is_public_ip = False try: is_public_ip = is_public_subnet(ip) except: # IPv6 # Openstack Icehouse sets 'OS-EXT-IPS:type' to 'floating' # for public and 'fixed' for private explicit_ip_type = value.get('OS-EXT-IPS:type', None) if label in public_networks_labels: is_public_ip = True elif explicit_ip_type == 'floating': is_public_ip = True elif explicit_ip_type == 'fixed': is_public_ip = False if is_public_ip: public_ips.append(ip) else: private_ips.append(ip) # Sometimes 'image' attribute is not present if the node is in an error # state image = api_node.get('image', None) image_id = image.get('id', None) if image else None config_drive = api_node.get("config_drive", False) volumes_attached = api_node.get('os-extended-volumes:volumes_attached') created = parse_date(api_node["created"]) return Node( id=api_node['id'], name=api_node['name'], state=self.NODE_STATE_MAP.get(api_node['status'], NodeState.UNKNOWN), public_ips=public_ips, private_ips=private_ips, created_at=created, driver=self, extra=dict( addresses=api_node['addresses'], hostId=api_node['hostId'], access_ip=api_node.get('accessIPv4'), access_ipv6=api_node.get('accessIPv6', None), # Docs says "tenantId", but actual is "tenant_id". *sigh* # Best handle both. tenantId=api_node.get('tenant_id') or api_node['tenantId'], userId=api_node.get('user_id', None), imageId=image_id, flavorId=api_node['flavor']['id'], uri=next(link['href'] for link in api_node['links'] if link['rel'] == 'self'), service_name=self.connection.get_service_name(), metadata=api_node['metadata'], password=api_node.get('adminPass', None), created=api_node['created'], updated=api_node['updated'], key_name=api_node.get('key_name', None), disk_config=api_node.get('OS-DCF:diskConfig', None), config_drive=config_drive, availability_zone=api_node.get('OS-EXT-AZ:availability_zone'), volumes_attached=volumes_attached, task_state=api_node.get("OS-EXT-STS:task_state", None), vm_state=api_node.get("OS-EXT-STS:vm_state", None), power_state=api_node.get("OS-EXT-STS:power_state", None), progress=api_node.get("progress", None), fault=api_node.get('fault') ), ) def _to_volume(self, api_node): if 'volume' in api_node: api_node = api_node['volume'] state = self.VOLUME_STATE_MAP.get(api_node['status'], StorageVolumeState.UNKNOWN) return StorageVolume( id=api_node['id'], name=api_node['displayName'], size=api_node['size'], state=state, driver=self, extra={ 'description': api_node['displayDescription'], 'attachments': [att for att in api_node['attachments'] if att], # TODO: remove in 1.18.0 'state': api_node.get('status', None), 'snapshot_id': api_node.get('snapshotId', None), 'location': api_node.get('availabilityZone', None), 'volume_type': api_node.get('volumeType', None), 'metadata': api_node.get('metadata', None), 'created_at': api_node.get('createdAt', None) } ) def _to_snapshot(self, data): if 'snapshot' in data: data = data['snapshot'] volume_id = data.get('volume_id', data.get('volumeId', None)) display_name = data.get('display_name', data.get('displayName', None)) created_at = data.get('created_at', data.get('createdAt', None)) description = data.get('display_description', data.get('displayDescription', None)) status = data.get('status', None) extra = {'volume_id': volume_id, 'name': display_name, 'created': created_at, 'description': description, 'status': status} state = self.SNAPSHOT_STATE_MAP.get( status, VolumeSnapshotState.UNKNOWN ) try: created_dt = parse_date(created_at) except ValueError: created_dt = None snapshot = VolumeSnapshot(id=data['id'], driver=self, size=data['size'], extra=extra, created=created_dt, state=state, name=display_name) return snapshot def _to_size(self, api_flavor, price=None, bandwidth=None): # if provider-specific subclasses can get better values for # price/bandwidth, then can pass them in when they super(). if not price: price = self._get_size_price(str(api_flavor['id'])) extra = api_flavor.get('OS-FLV-WITH-EXT-SPECS:extra_specs', {}) return OpenStackNodeSize( id=api_flavor['id'], name=api_flavor['name'], ram=api_flavor['ram'], disk=api_flavor['disk'], vcpus=api_flavor['vcpus'], ephemeral_disk=api_flavor.get('OS-FLV-EXT-DATA:ephemeral', None), swap=api_flavor['swap'], extra=extra, bandwidth=bandwidth, price=price, driver=self, ) def _get_size_price(self, size_id): try: return get_size_price( driver_type='compute', driver_name=self.api_name, size_id=size_id, ) except KeyError: return(0.0) def _extract_image_id_from_url(self, location_header): path = urlparse.urlparse(location_header).path image_id = path.split('/')[-1] return image_id def ex_rescue(self, node, password=None): # Requires Rescue Mode extension """ Rescue a node :param node: node :type node: :class:`Node` :param password: password :type password: ``str`` :rtype: :class:`Node` """ if password: resp = self._node_action(node, 'rescue', adminPass=password) else: resp = self._node_action(node, 'rescue') password = json.loads(resp.body)['adminPass'] node.extra['password'] = password return node def ex_unrescue(self, node): """ Unrescue a node :param node: node :type node: :class:`Node` :rtype: ``bool`` """ resp = self._node_action(node, 'unrescue') return resp.status == httplib.ACCEPTED def _to_floating_ip_pools(self, obj): pool_elements = obj['floating_ip_pools'] return [self._to_floating_ip_pool(pool) for pool in pool_elements] def _to_floating_ip_pool(self, obj): return OpenStack_1_1_FloatingIpPool(obj['name'], self.connection) def ex_list_floating_ip_pools(self): """ List available floating IP pools :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpPool` """ return self._to_floating_ip_pools( self.connection.request('/os-floating-ip-pools').object) def _to_floating_ips(self, obj): ip_elements = obj['floating_ips'] return [self._to_floating_ip(ip) for ip in ip_elements] def _to_floating_ip(self, obj): return OpenStack_1_1_FloatingIpAddress(id=obj['id'], ip_address=obj['ip'], pool=None, node_id=obj['instance_id'], driver=self) def ex_list_floating_ips(self): """ List floating IPs :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress` """ return self._to_floating_ips( self.connection.request('/os-floating-ips').object) def ex_get_floating_ip(self, ip): """ Get specified floating IP :param ip: floating IP to get :type ip: ``str`` :rtype: :class:`OpenStack_1_1_FloatingIpAddress` """ floating_ips = self.ex_list_floating_ips() ip_obj, = [x for x in floating_ips if x.ip_address == ip] return ip_obj def ex_create_floating_ip(self, ip_pool=None): """ Create new floating IP. The ip_pool attribute is optional only if your infrastructure has only one IP pool available. :param ip_pool: name of the floating IP pool :type ip_pool: ``str`` :rtype: :class:`OpenStack_1_1_FloatingIpAddress` """ data = {'pool': ip_pool} if ip_pool is not None else {} resp = self.connection.request('/os-floating-ips', method='POST', data=data) data = resp.object['floating_ip'] id = data['id'] ip_address = data['ip'] return OpenStack_1_1_FloatingIpAddress(id=id, ip_address=ip_address, pool=None, node_id=None, driver=self) def ex_delete_floating_ip(self, ip): """ Delete specified floating IP :param ip: floating IP to remove :type ip: :class:`OpenStack_1_1_FloatingIpAddress` :rtype: ``bool`` """ resp = self.connection.request('/os-floating-ips/%s' % ip.id, method='DELETE') return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) def ex_attach_floating_ip_to_node(self, node, ip): """ Attach the floating IP to the node :param node: node :type node: :class:`Node` :param ip: floating IP to attach :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress` :rtype: ``bool`` """ address = ip.ip_address if hasattr(ip, 'ip_address') else ip data = { 'addFloatingIp': {'address': address} } resp = self.connection.request('/servers/%s/action' % node.id, method='POST', data=data) return resp.status == httplib.ACCEPTED def ex_detach_floating_ip_from_node(self, node, ip): """ Detach the floating IP from the node :param node: node :type node: :class:`Node` :param ip: floating IP to remove :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress` :rtype: ``bool`` """ address = ip.ip_address if hasattr(ip, 'ip_address') else ip data = { 'removeFloatingIp': {'address': address} } resp = self.connection.request('/servers/%s/action' % node.id, method='POST', data=data) return resp.status == httplib.ACCEPTED def ex_get_metadata_for_node(self, node): """ Return the metadata associated with the node. :param node: Node instance :type node: :class:`Node` :return: A dictionary or other mapping of strings to strings, associating tag names with tag values. :type tags: ``dict`` """ return node.extra['metadata'] def ex_pause_node(self, node): return self._post_simple_node_action(node, 'pause') def ex_unpause_node(self, node): return self._post_simple_node_action(node, 'unpause') def ex_stop_node(self, node): return self._post_simple_node_action(node, 'os-stop') def ex_start_node(self, node): return self._post_simple_node_action(node, 'os-start') def ex_suspend_node(self, node): return self._post_simple_node_action(node, 'suspend') def ex_resume_node(self, node): return self._post_simple_node_action(node, 'resume') def _post_simple_node_action(self, node, action): """ Post a simple, data-less action to the OS node action endpoint :param `Node` node: :param str action: the action to call :return `bool`: a boolean that indicates success """ uri = '/servers/{node_id}/action'.format(node_id=node.id) resp = self.connection.request(uri, method='POST', data={action: None}) return resp.status == httplib.ACCEPTED class OpenStack_2_Connection(OpenStackComputeConnection): responseCls = OpenStack_1_1_Response accept_format = 'application/json' default_content_type = 'application/json; charset=UTF-8' def encode_data(self, data): return json.dumps(data) class OpenStack_2_NodeDriver(OpenStack_1_1_NodeDriver): """ OpenStack node driver. """ connectionCls = OpenStack_2_Connection type = Provider.OPENSTACK features = {"create_node": ["generates_password"]} _networks_url_prefix = '/os-networks' def __init__(self, *args, **kwargs): self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', None)) if 'ex_force_auth_version' not in kwargs: kwargs['ex_force_auth_version'] = '3.x_password' super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs) class OpenStack_1_1_FloatingIpPool(object): """ Floating IP Pool info. """ def __init__(self, name, connection): self.name = name self.connection = connection def list_floating_ips(self): """ List floating IPs in the pool :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress` """ return self._to_floating_ips( self.connection.request('/os-floating-ips').object) def _to_floating_ips(self, obj): ip_elements = obj['floating_ips'] return [self._to_floating_ip(ip) for ip in ip_elements] def _to_floating_ip(self, obj): return OpenStack_1_1_FloatingIpAddress(id=obj['id'], ip_address=obj['ip'], pool=self, node_id=obj['instance_id'], driver=self.connection.driver) def get_floating_ip(self, ip): """ Get specified floating IP from the pool :param ip: floating IP to get :type ip: ``str`` :rtype: :class:`OpenStack_1_1_FloatingIpAddress` """ ip_obj, = [x for x in self.list_floating_ips() if x.ip_address == ip] return ip_obj def create_floating_ip(self): """ Create new floating IP in the pool :rtype: :class:`OpenStack_1_1_FloatingIpAddress` """ resp = self.connection.request('/os-floating-ips', method='POST', data={'pool': self.name}) data = resp.object['floating_ip'] id = data['id'] ip_address = data['ip'] return OpenStack_1_1_FloatingIpAddress(id=id, ip_address=ip_address, pool=self, node_id=None, driver=self.connection.driver) def delete_floating_ip(self, ip): """ Delete specified floating IP from the pool :param ip: floating IP to remove :type ip::class:`OpenStack_1_1_FloatingIpAddress` :rtype: ``bool`` """ resp = self.connection.request('/os-floating-ips/%s' % ip.id, method='DELETE') return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) def __repr__(self): return ('' % self.name) class OpenStack_1_1_FloatingIpAddress(object): """ Floating IP info. """ def __init__(self, id, ip_address, pool, node_id=None, driver=None): self.id = str(id) self.ip_address = ip_address self.pool = pool self.node_id = node_id self.driver = driver def delete(self): """ Delete this floating IP :rtype: ``bool`` """ if self.pool is not None: return self.pool.delete_floating_ip(self) elif self.driver is not None: return self.driver.ex_delete_floating_ip(self) def __repr__(self): return ('' % (self.id, self.ip_address, self.pool, self.driver)) apache-libcloud-2.2.1/libcloud/compute/drivers/skalicloud.py0000664000175000017500000000554512701023453024057 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ skalicloud Driver """ from libcloud.compute.types import Provider from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection # API end-points API_ENDPOINTS = { 'my-1': { 'name': 'Malaysia, Kuala Lumpur', 'country': 'Malaysia', 'host': 'api.sdg-my.skalicloud.com' } } # Default API end-point for the base connection class. DEFAULT_ENDPOINT = 'my-1' # Retrieved from http://www.skalicloud.com/cloud-api/ STANDARD_DRIVES = { '90aa51f2-15c0-4cff-81ee-e93aa20b9468': { 'uuid': '90aa51f2-15c0-4cff-81ee-e93aa20b9468', 'description': 'CentOS 5.5 -64bit', 'size_gunzipped': '1GB', 'supports_deployment': True, }, 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f': { 'uuid': 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f', 'description': 'Debian 5 -64bit', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '3051699a-a536-4220-aeb5-67f2ec101a09': { 'uuid': '3051699a-a536-4220-aeb5-67f2ec101a09', 'description': 'Ubuntu Server 10.10 -64bit', 'size_gunzipped': '1GB', 'supports_deployment': True, }, '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9': { 'uuid': '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9', 'description': 'Windows 2008R2 Web Edition', 'size_gunzipped': '13GB', 'supports_deployment': False, }, '93bf390e-4f46-4252-a8bc-9d6d80e3f955': { 'uuid': '93bf390e-4f46-4252-a8bc-9d6d80e3f955', 'description': 'Windows Server 2008R2 Standard', 'size_gunzipped': '13GB', 'supports_deployment': False, } } class SkaliCloudConnection(ElasticStackBaseConnection): host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] class SkaliCloudNodeDriver(ElasticStackBaseNodeDriver): type = Provider.SKALICLOUD api_name = 'skalicloud' name = 'skalicloud' website = 'http://www.skalicloud.com/' connectionCls = SkaliCloudConnection features = {"create_node": ["generates_password"]} _standard_drives = STANDARD_DRIVES apache-libcloud-2.2.1/libcloud/compute/drivers/packet.py0000664000175000017500000002237212701023453023171 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Packet Driver """ from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import Provider, NodeState, InvalidCredsError from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeImage, NodeSize, NodeLocation from libcloud.compute.base import KeyPair PACKET_ENDPOINT = "api.packet.net" class PacketResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_error(self): if self.status == httplib.UNAUTHORIZED: body = self.parse_body() raise InvalidCredsError(body['message']) else: body = self.parse_body() if 'message' in body: error = '%s (code: %s)' % (body['message'], self.status) else: error = body return error def success(self): return self.status in self.valid_response_codes class PacketConnection(ConnectionKey): """ Connection class for the Packet driver. """ host = PACKET_ENDPOINT responseCls = PacketResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request """ headers['Content-Type'] = 'application/json' headers['X-Auth-Token'] = self.key headers['X-Consumer-Token'] = \ 'kcrhMn7hwG8Ceo2hAhGFa2qpxLBvVHxEjS9ue8iqmsNkeeB2iQgMq4dNc1893pYu' return headers class PacketNodeDriver(NodeDriver): """ Packet NodeDriver """ connectionCls = PacketConnection type = Provider.PACKET name = 'Packet' website = 'http://www.packet.net/' NODE_STATE_MAP = {'queued': NodeState.PENDING, 'provisioning': NodeState.PENDING, 'rebuilding': NodeState.PENDING, 'powering_on': NodeState.REBOOTING, 'powering_off': NodeState.REBOOTING, 'rebooting': NodeState.REBOOTING, 'inactive': NodeState.STOPPED, 'deleted': NodeState.TERMINATED, 'deprovisioning': NodeState.TERMINATED, 'failed': NodeState.ERROR, 'active': NodeState.RUNNING} def list_nodes(self, ex_project_id): data = self.connection.request('/projects/%s/devices' % (ex_project_id), params={'include': 'plan'} ).object['devices'] return list(map(self._to_node, data)) def list_locations(self): data = self.connection.request('/facilities')\ .object['facilities'] return list(map(self._to_location, data)) def list_images(self): data = self.connection.request('/operating-systems')\ .object['operating_systems'] return list(map(self._to_image, data)) def list_sizes(self): data = self.connection.request('/plans').object['plans'] return list(map(self._to_size, data)) def create_node(self, name, size, image, location, ex_project_id): """ Create a node. :return: The newly created node. :rtype: :class:`Node` """ params = {'hostname': name, 'plan': size.id, 'operating_system': image.id, 'facility': location.id, 'include': 'plan', 'billing_cycle': 'hourly'} data = self.connection.request('/projects/%s/devices' % (ex_project_id), params=params, method='POST') status = data.object.get('status', 'OK') if status == 'ERROR': message = data.object.get('message', None) error_message = data.object.get('error_message', message) raise ValueError('Failed to create node: %s' % (error_message)) return self._to_node(data=data.object) def reboot_node(self, node): params = {'type': 'reboot'} res = self.connection.request('/devices/%s/actions' % (node.id), params=params, method='POST') return res.status == httplib.OK def destroy_node(self, node): res = self.connection.request('/devices/%s' % (node.id), method='DELETE') return res.status == httplib.OK def list_key_pairs(self): """ List all the available SSH keys. :return: Available SSH keys. :rtype: ``list`` of :class:`.KeyPair` objects """ data = self.connection.request('/ssh-keys').object['ssh_keys'] return list(map(self._to_key_pairs, data)) def create_key_pair(self, name, public_key): """ Create a new SSH key. :param name: Key name (required) :type name: ``str`` :param public_key: Valid public key string (required) :type public_key: ``str`` """ params = {'label': name, 'key': public_key} data = self.connection.request('/ssh-keys', method='POST', params=params).object return self._to_key_pairs(data) def delete_key_pair(self, key): """ Delete an existing SSH key. :param key: SSH key (required) :type key: :class:`KeyPair` """ key_id = key.name res = self.connection.request('/ssh-keys/%s' % (key_id), method='DELETE') return res.status == httplib.NO_CONTENT def _to_node(self, data): extra_keys = ['created_at', 'updated_at', 'userdata', 'billing_cycle', 'locked'] if 'state' in data: state = self.NODE_STATE_MAP.get(data['state'], NodeState.UNKNOWN) else: state = NodeState.UNKNOWN if 'ip_addresses' in data and data['ip_addresses'] is not None: ips = self._parse_ips(data['ip_addresses']) if 'operating_system' in data and data['operating_system'] is not None: image = self._to_image(data['operating_system']) if 'plan' in data and data['plan'] is not None: size = self._to_size(data['plan']) extra = {} for key in extra_keys: if key in data: extra[key] = data[key] node = Node(id=data['id'], name=data['hostname'], state=state, image=image, size=size, public_ips=ips['public'], private_ips=ips['private'], extra=extra, driver=self) return node def _to_image(self, data): extra = {'distro': data['distro'], 'version': data['version']} return NodeImage(id=data['slug'], name=data['name'], extra=extra, driver=self) def _to_location(self, data): return NodeLocation(id=data['code'], name=data['name'], country=None, driver=self) def _to_size(self, data): extra = {'description': data['description'], 'line': data['line']} ram = data['specs']['memory']['total'].lower() if 'mb' in ram: ram = int(ram.replace('mb', '')) elif 'gb' in ram: ram = int(ram.replace('gb', '')) * 1024 disk = 0 for disks in data['specs']['drives']: disk += disks['count'] * int(disks['size'].replace('GB', '')) price = data['pricing']['hourly'] return NodeSize(id=data['slug'], name=data['name'], ram=ram, disk=disk, bandwidth=0, price=price, extra=extra, driver=self) def _to_key_pairs(self, data): extra = {'label': data['label'], 'created_at': data['created_at'], 'updated_at': data['updated_at']} return KeyPair(name=data['id'], fingerprint=data['fingerprint'], public_key=data['key'], private_key=None, driver=self, extra=extra) def _parse_ips(self, data): public_ips = [] private_ips = [] for address in data: if 'address' in address and address['address'] is not None: if 'public' in address and address['public'] is True: public_ips.append(address['address']) else: private_ips.append(address['address']) return {'public': public_ips, 'private': private_ips} apache-libcloud-2.2.1/libcloud/compute/drivers/internetsolutions.py0000664000175000017500000000400212701023453025520 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Internet Solutions Driver """ from libcloud.compute.providers import Provider from libcloud.common.dimensiondata import (DimensionDataConnection, API_ENDPOINTS) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver DEFAULT_REGION = 'is-af' class InternetSolutionsNodeDriver(DimensionDataNodeDriver): """ InternetSolutions node driver, based on Dimension Data driver """ selected_region = None connectionCls = DimensionDataConnection name = 'InternetSolutions' website = 'http://www.is.co.za/' type = Provider.INTERNETSOLUTIONS features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self.selected_region = API_ENDPOINTS[region] super(InternetSolutionsNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/dummy.py0000664000175000017500000002545412701023453023061 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dummy Driver @note: This driver is out of date """ import uuid import socket import struct from libcloud.common.base import ConnectionKey from libcloud.compute.base import NodeImage, NodeSize, Node from libcloud.compute.base import NodeDriver, NodeLocation from libcloud.compute.base import KeyPair from libcloud.compute.types import Provider, NodeState class DummyConnection(ConnectionKey): """ Dummy connection class """ def connect(self, host=None, port=None): pass class DummyNodeDriver(NodeDriver): """ Dummy node driver This is a fake driver which appears to always create or destroy nodes successfully. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node=driver.create_node() >>> node.public_ips[0] '127.0.0.3' >>> node.name 'dummy-3' If the credentials you give convert to an integer then the next node to be created will be one higher. Each time you create a node you will get a different IP address. >>> driver = DummyNodeDriver(22) >>> node=driver.create_node() >>> node.name 'dummy-23' """ name = "Dummy Node Provider" website = 'http://example.com' type = Provider.DUMMY def __init__(self, creds): """ :param creds: Credentials :type creds: ``str`` :rtype: ``None`` """ self.creds = creds try: num = int(creds) except ValueError: num = None if num: self.nl = [] startip = _ip_to_int('127.0.0.1') for i in range(num): ip = _int_to_ip(startip + i) self.nl.append( Node(id=i, name='dummy-%d' % (i), state=NodeState.RUNNING, public_ips=[ip], private_ips=[], driver=self, extra={'foo': 'bar'}) ) else: self.nl = [ Node(id=1, name='dummy-1', state=NodeState.RUNNING, public_ips=['127.0.0.1'], private_ips=[], driver=self, extra={'foo': 'bar'}), Node(id=2, name='dummy-2', state=NodeState.RUNNING, public_ips=['127.0.0.1'], private_ips=[], driver=self, extra={'foo': 'bar'}), ] self.connection = DummyConnection(self.creds) def get_uuid(self, unique_field=None): """ :param unique_field: Unique field :type unique_field: ``bool`` :rtype: :class:`UUID` """ return str(uuid.uuid4()) def list_nodes(self): """ List the nodes known to a particular driver; There are two default nodes created at the beginning >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node_list=driver.list_nodes() >>> sorted([node.name for node in node_list ]) ['dummy-1', 'dummy-2'] each item in the list returned is a node object from which you can carry out any node actions you wish >>> node_list[0].reboot() True As more nodes are added, list_nodes will return them >>> node=driver.create_node() >>> node.size.id 's1' >>> node.image.id 'i2' >>> sorted([n.name for n in driver.list_nodes()]) ['dummy-1', 'dummy-2', 'dummy-3'] @inherits: :class:`NodeDriver.list_nodes` """ return self.nl def reboot_node(self, node): """ Sets the node state to rebooting; in this dummy driver always returns True as if the reboot had been successful. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node=driver.create_node() >>> from libcloud.compute.types import NodeState >>> node.state == NodeState.RUNNING True >>> node.state == NodeState.REBOOTING False >>> driver.reboot_node(node) True >>> node.state == NodeState.REBOOTING True Please note, dummy nodes never recover from the reboot. @inherits: :class:`NodeDriver.reboot_node` """ node.state = NodeState.REBOOTING return True def destroy_node(self, node): """ Sets the node state to terminated and removes it from the node list >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> from libcloud.compute.types import NodeState >>> node = [node for node in driver.list_nodes() if ... node.name == 'dummy-1'][0] >>> node.state == NodeState.RUNNING True >>> driver.destroy_node(node) True >>> node.state == NodeState.RUNNING False >>> [n for n in driver.list_nodes() if n.name == 'dummy-1'] [] @inherits: :class:`NodeDriver.destroy_node` """ node.state = NodeState.TERMINATED self.nl.remove(node) return True def list_images(self, location=None): """ Returns a list of images as a cloud provider might have >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> sorted([image.name for image in driver.list_images()]) ['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10'] @inherits: :class:`NodeDriver.list_images` """ return [ NodeImage(id=1, name="Ubuntu 9.10", driver=self), NodeImage(id=2, name="Ubuntu 9.04", driver=self), NodeImage(id=3, name="Slackware 4", driver=self), ] def list_sizes(self, location=None): """ Returns a list of node sizes as a cloud provider might have >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> sorted([size.ram for size in driver.list_sizes()]) [128, 512, 4096, 8192] @inherits: :class:`NodeDriver.list_images` """ return [ NodeSize(id=1, name="Small", ram=128, disk=4, bandwidth=500, price=4, driver=self), NodeSize(id=2, name="Medium", ram=512, disk=16, bandwidth=1500, price=8, driver=self), NodeSize(id=3, name="Big", ram=4096, disk=32, bandwidth=2500, price=32, driver=self), NodeSize(id=4, name="XXL Big", ram=4096 * 2, disk=32 * 4, bandwidth=2500 * 3, price=32 * 2, driver=self), ] def list_locations(self): """ Returns a list of locations of nodes >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> sorted([loc.name + " in " + loc.country for loc in ... driver.list_locations()]) ['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"] @inherits: :class:`NodeDriver.list_locations` """ return [ NodeLocation(id=1, name="Paul's Room", country='US', driver=self), NodeLocation(id=2, name="London Loft", country='GB', driver=self), NodeLocation(id=3, name="Island Datacenter", country='FJ', driver=self), ] def create_node(self, **kwargs): """ Creates a dummy node; the node id is equal to the number of nodes in the node list >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> sorted([node.name for node in driver.list_nodes()]) ['dummy-1', 'dummy-2'] >>> nodeA = driver.create_node() >>> sorted([node.name for node in driver.list_nodes()]) ['dummy-1', 'dummy-2', 'dummy-3'] >>> driver.create_node().name 'dummy-4' >>> driver.destroy_node(nodeA) True >>> sorted([node.name for node in driver.list_nodes()]) ['dummy-1', 'dummy-2', 'dummy-4'] @inherits: :class:`NodeDriver.create_node` """ l = len(self.nl) + 1 n = Node(id=l, name='dummy-%d' % l, state=NodeState.RUNNING, public_ips=['127.0.0.%d' % l], private_ips=[], driver=self, size=NodeSize(id='s1', name='foo', ram=2048, disk=160, bandwidth=None, price=0.0, driver=self), image=NodeImage(id='i2', name='image', driver=self), extra={'foo': 'bar'}) self.nl.append(n) return n def import_key_pair_from_string(self, name, key_material): key_pair = KeyPair(name=name, public_key=key_material, fingerprint='fingerprint', private_key='private_key', driver=self) return key_pair def _ip_to_int(ip): return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0]) def _int_to_ip(ip): return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip))) if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/compute/drivers/softlayer.py0000664000175000017500000004074613153541406023744 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Softlayer driver """ import time try: from Crypto.PublicKey import RSA crypto = True except ImportError: crypto = False from libcloud.common.softlayer import SoftLayerConnection, SoftLayerException from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \ NodeImage, KeyPair from libcloud.compute.types import KeyPairDoesNotExistError DEFAULT_DOMAIN = 'example.com' DEFAULT_CPU_SIZE = 1 DEFAULT_RAM_SIZE = 2048 DEFAULT_DISK_SIZE = 100 DATACENTERS = { 'hou02': {'country': 'US'}, 'sea01': {'country': 'US', 'name': 'Seattle - West Coast U.S.'}, 'wdc01': {'country': 'US', 'name': 'Washington, DC - East Coast U.S.'}, 'dal01': {'country': 'US'}, 'dal02': {'country': 'US'}, 'dal04': {'country': 'US'}, 'dal05': {'country': 'US', 'name': 'Dallas - Central U.S.'}, 'dal06': {'country': 'US'}, 'dal07': {'country': 'US'}, 'sjc01': {'country': 'US', 'name': 'San Jose - West Coast U.S.'}, 'sng01': {'country': 'SG', 'name': 'Singapore - Southeast Asia'}, 'ams01': {'country': 'NL', 'name': 'Amsterdam - Western Europe'}, 'tok02': {'country': 'JP', 'name': 'Tokyo - Japan'}, } NODE_STATE_MAP = { 'RUNNING': NodeState.RUNNING, 'HALTED': NodeState.UNKNOWN, 'PAUSED': NodeState.UNKNOWN, 'INITIATING': NodeState.PENDING } SL_BASE_TEMPLATES = [ { 'name': '1 CPU, 1GB ram, 25GB', 'ram': 1024, 'disk': 25, 'cpus': 1, }, { 'name': '1 CPU, 1GB ram, 100GB', 'ram': 1024, 'disk': 100, 'cpus': 1, }, { 'name': '1 CPU, 2GB ram, 100GB', 'ram': 2 * 1024, 'disk': 100, 'cpus': 1, }, { 'name': '1 CPU, 4GB ram, 100GB', 'ram': 4 * 1024, 'disk': 100, 'cpus': 1, }, { 'name': '2 CPU, 2GB ram, 100GB', 'ram': 2 * 1024, 'disk': 100, 'cpus': 2, }, { 'name': '2 CPU, 4GB ram, 100GB', 'ram': 4 * 1024, 'disk': 100, 'cpus': 2, }, { 'name': '2 CPU, 8GB ram, 100GB', 'ram': 8 * 1024, 'disk': 100, 'cpus': 2, }, { 'name': '4 CPU, 4GB ram, 100GB', 'ram': 4 * 1024, 'disk': 100, 'cpus': 4, }, { 'name': '4 CPU, 8GB ram, 100GB', 'ram': 8 * 1024, 'disk': 100, 'cpus': 4, }, { 'name': '6 CPU, 4GB ram, 100GB', 'ram': 4 * 1024, 'disk': 100, 'cpus': 6, }, { 'name': '6 CPU, 8GB ram, 100GB', 'ram': 8 * 1024, 'disk': 100, 'cpus': 6, }, { 'name': '8 CPU, 8GB ram, 100GB', 'ram': 8 * 1024, 'disk': 100, 'cpus': 8, }, { 'name': '8 CPU, 16GB ram, 100GB', 'ram': 16 * 1024, 'disk': 100, 'cpus': 8, }] SL_TEMPLATES = {} for i, template in enumerate(SL_BASE_TEMPLATES): # Add local disk templates local = template.copy() local['local_disk'] = True SL_TEMPLATES[i] = local class SoftLayerNodeDriver(NodeDriver): """ SoftLayer node driver Extra node attributes: - password: root password - hourlyRecurringFee: hourly price (if applicable) - recurringFee : flat rate (if applicable) - recurringMonths : The number of months in which the recurringFee will be incurred. """ connectionCls = SoftLayerConnection name = 'SoftLayer' website = 'http://www.softlayer.com/' type = Provider.SOFTLAYER features = {'create_node': ['generates_password', 'ssh_key']} api_name = 'softlayer' def _to_node(self, host): try: password = \ host['operatingSystem']['passwords'][0]['password'] except (IndexError, KeyError): password = None hourlyRecurringFee = host.get('billingItem', {}).get( 'hourlyRecurringFee', 0) recurringFee = host.get('billingItem', {}).get('recurringFee', 0) recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) createDate = host.get('createDate', None) # When machine is launching it gets state halted # we change this to pending state = NODE_STATE_MAP.get(host['powerState']['keyName'], NodeState.UNKNOWN) if not password and state == NodeState.UNKNOWN: state = NODE_STATE_MAP['INITIATING'] public_ips = [] private_ips = [] if 'primaryIpAddress' in host: public_ips.append(host['primaryIpAddress']) if 'primaryBackendIpAddress' in host: private_ips.append(host['primaryBackendIpAddress']) image = host.get('operatingSystem', {}).get('softwareLicense', {}) \ .get('softwareDescription', {}) \ .get('longDescription', None) return Node( id=host['id'], name=host['fullyQualifiedDomainName'], state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra={ 'hostname': host['hostname'], 'fullyQualifiedDomainName': host['fullyQualifiedDomainName'], 'password': password, 'maxCpu': host.get('maxCpu', None), 'datacenter': host.get('datacenter', {}).get('longName', None), 'maxMemory': host.get('maxMemory', None), 'image': image, 'hourlyRecurringFee': hourlyRecurringFee, 'recurringFee': recurringFee, 'recurringMonths': recurringMonths, 'created': createDate, } ) def destroy_node(self, node): self.connection.request( 'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id ) return True def reboot_node(self, node): self.connection.request( 'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id ) return True def ex_stop_node(self, node): self.connection.request( 'SoftLayer_Virtual_Guest', 'powerOff', id=node.id ) return True def ex_start_node(self, node): self.connection.request( 'SoftLayer_Virtual_Guest', 'powerOn', id=node.id ) return True def _get_order_information(self, node_id, timeout=1200, check_interval=5): mask = { 'billingItem': '', 'powerState': '', 'operatingSystem': {'passwords': ''}, 'provisionDate': '', } for i in range(0, timeout, check_interval): res = self.connection.request( 'SoftLayer_Virtual_Guest', 'getObject', id=node_id, object_mask=mask ).object if res.get('provisionDate', None): return res time.sleep(check_interval) raise SoftLayerException('Timeout on getting node details') def create_node(self, **kwargs): """Create a new SoftLayer node @inherits: :class:`NodeDriver.create_node` :keyword ex_domain: e.g. libcloud.org :type ex_domain: ``str`` :keyword ex_cpus: e.g. 2 :type ex_cpus: ``int`` :keyword ex_disk: e.g. 100 :type ex_disk: ``int`` :keyword ex_ram: e.g. 2048 :type ex_ram: ``int`` :keyword ex_bandwidth: e.g. 100 :type ex_bandwidth: ``int`` :keyword ex_local_disk: e.g. True :type ex_local_disk: ``bool`` :keyword ex_datacenter: e.g. Dal05 :type ex_datacenter: ``str`` :keyword ex_os: e.g. UBUNTU_LATEST :type ex_os: ``str`` :keyword ex_keyname: The name of the key pair :type ex_keyname: ``str`` """ name = kwargs['name'] os = 'DEBIAN_LATEST' if 'ex_os' in kwargs: os = kwargs['ex_os'] elif 'image' in kwargs: os = kwargs['image'].id size = kwargs.get('size', NodeSize(id=123, name='Custom', ram=None, disk=None, bandwidth=None, price=None, driver=self.connection.driver)) ex_size_data = SL_TEMPLATES.get(int(size.id)) or {} # plan keys are ints cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \ DEFAULT_CPU_SIZE ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \ DEFAULT_RAM_SIZE bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10 hourly = 'true' if kwargs.get('ex_hourly', True) else 'false' local_disk = 'true' if ex_size_data.get('local_disk') is False: local_disk = 'false' if kwargs.get('ex_local_disk') is False: local_disk = 'false' disk_size = DEFAULT_DISK_SIZE if size.disk: disk_size = size.disk if kwargs.get('ex_disk'): disk_size = kwargs.get('ex_disk') datacenter = '' if 'ex_datacenter' in kwargs: datacenter = kwargs['ex_datacenter'] elif 'location' in kwargs: datacenter = kwargs['location'].id domain = kwargs.get('ex_domain') if domain is None: if name.find('.') != -1: domain = name[name.find('.') + 1:] if domain is None: # TODO: domain is a required argument for the Sofylayer API, but it # it shouldn't be. domain = DEFAULT_DOMAIN newCCI = { 'hostname': name, 'domain': domain, 'startCpus': cpu_count, 'maxMemory': ram, 'networkComponents': [{'maxSpeed': bandwidth}], 'hourlyBillingFlag': hourly, 'operatingSystemReferenceCode': os, 'localDiskFlag': local_disk, 'blockDevices': [ { 'device': '0', 'diskImage': { 'capacity': disk_size, } } ] } if datacenter: newCCI['datacenter'] = {'name': datacenter} if 'ex_keyname' in kwargs: newCCI['sshKeys'] = [ { 'id': self._key_name_to_id(kwargs['ex_keyname']) } ] res = self.connection.request( 'SoftLayer_Virtual_Guest', 'createObject', newCCI ).object node_id = res['id'] raw_node = self._get_order_information(node_id) return self._to_node(raw_node) def list_key_pairs(self): result = self.connection.request( 'SoftLayer_Account', 'getSshKeys' ).object elems = [x for x in result] key_pairs = self._to_key_pairs(elems=elems) return key_pairs def get_key_pair(self, name): key_id = self._key_name_to_id(name=name) result = self.connection.request( 'SoftLayer_Security_Ssh_Key', 'getObject', id=key_id ).object return self._to_key_pair(result) # TODO: Check this with the libcloud guys, # can we create new dependencies? def create_key_pair(self, name, ex_size=4096): if crypto is False: raise NotImplementedError('create_key_pair needs' 'the pycrypto library') key = RSA.generate(ex_size) new_key = { 'key': key.publickey().exportKey('OpenSSH'), 'label': name, 'notes': '', } result = self.connection.request( 'SoftLayer_Security_Ssh_Key', 'createObject', new_key ).object result['private'] = key.exportKey('PEM') return self._to_key_pair(result) def import_key_pair_from_string(self, name, key_material): new_key = { 'key': key_material, 'label': name, 'notes': '', } result = self.connection.request( 'SoftLayer_Security_Ssh_Key', 'createObject', new_key ).object key_pair = self._to_key_pair(result) return key_pair def delete_key_pair(self, key_pair): key = self._key_name_to_id(key_pair) result = self.connection.request( 'SoftLayer_Security_Ssh_Key', 'deleteObject', id=key ).object return result def _to_image(self, img): return NodeImage( id=img['template']['operatingSystemReferenceCode'], name=img['itemPrice']['item']['description'], driver=self.connection.driver ) def list_images(self, location=None): result = self.connection.request( 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions' ).object return [self._to_image(i) for i in result['operatingSystems']] def get_image(self, image_id): """ Gets an image based on an image_id. :param image_id: Image identifier :type image_id: ``str`` :return: A NodeImage object :rtype: :class:`NodeImage` """ images = self.list_images() images = [image for image in images if image.id == image_id] if len(images) < 1: raise SoftLayerException('could not find the image with id %s' % image_id) image = images[0] return image def _to_size(self, id, size): return NodeSize( id=id, name=size['name'], ram=size['ram'], disk=size['disk'], bandwidth=size.get('bandwidth'), price=self._get_size_price(str(id)), driver=self.connection.driver, ) def list_sizes(self, location=None): return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()] def _to_loc(self, loc): country = 'UNKNOWN' loc_id = loc['template']['datacenter']['name'] name = loc_id if loc_id in DATACENTERS: country = DATACENTERS[loc_id]['country'] name = DATACENTERS[loc_id].get('name', loc_id) return NodeLocation(id=loc_id, name=name, country=country, driver=self) def list_locations(self): res = self.connection.request( 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions' ).object return [self._to_loc(l) for l in res['datacenters']] def list_nodes(self): mask = { 'virtualGuests': { 'powerState': '', 'hostname': '', 'maxMemory': '', 'datacenter': '', 'operatingSystem': {'passwords': ''}, 'billingItem': '', }, } res = self.connection.request( 'SoftLayer_Account', 'getVirtualGuests', object_mask=mask ).object return [self._to_node(h) for h in res] def _to_key_pairs(self, elems): key_pairs = [self._to_key_pair(elem=elem) for elem in elems] return key_pairs def _to_key_pair(self, elem): key_pair = KeyPair(name=elem['label'], public_key=elem['key'], fingerprint=elem['fingerprint'], private_key=elem.get('private', None), driver=self, extra={'id': elem['id']}) return key_pair def _key_name_to_id(self, name): result = self.connection.request( 'SoftLayer_Account', 'getSshKeys' ).object key_id = [x for x in result if x['label'] == name] if len(key_id) == 0: raise KeyPairDoesNotExistError(name, self) else: return int(key_id[0]['id']) apache-libcloud-2.2.1/libcloud/compute/drivers/elasticstack.py0000664000175000017500000003767712701023453024412 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base driver for the providers based on the ElasticStack platform - http://www.elasticstack.com. """ import re import time import base64 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b try: import simplejson as json except ImportError: import json from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.common.types import InvalidCredsError from libcloud.compute.types import NodeState from libcloud.compute.base import NodeDriver, NodeSize, Node from libcloud.compute.base import NodeImage from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment from libcloud.compute.deployment import MultiStepDeployment NODE_STATE_MAP = { 'active': NodeState.RUNNING, 'dead': NodeState.TERMINATED, 'dumped': NodeState.TERMINATED, } # Default timeout (in seconds) for the drive imaging process IMAGING_TIMEOUT = 10 * 60 # ElasticStack doesn't specify special instance types, so I just specified # some plans based on the other provider offerings. # # Basically for CPU any value between 500Mhz and 20000Mhz should work, # 256MB to 8192MB for ram and 1GB to 2TB for disk. INSTANCE_TYPES = { 'small': { 'id': 'small', 'name': 'Small instance', 'cpu': 2000, 'memory': 1700, 'disk': 160, 'bandwidth': None, }, 'medium': { 'id': 'medium', 'name': 'Medium instance', 'cpu': 3000, 'memory': 4096, 'disk': 500, 'bandwidth': None, }, 'large': { 'id': 'large', 'name': 'Large instance', 'cpu': 4000, 'memory': 7680, 'disk': 850, 'bandwidth': None, }, 'extra-large': { 'id': 'extra-large', 'name': 'Extra Large instance', 'cpu': 8000, 'memory': 8192, 'disk': 1690, 'bandwidth': None, }, 'high-cpu-medium': { 'id': 'high-cpu-medium', 'name': 'High-CPU Medium instance', 'cpu': 5000, 'memory': 1700, 'disk': 350, 'bandwidth': None, }, 'high-cpu-extra-large': { 'id': 'high-cpu-extra-large', 'name': 'High-CPU Extra Large instance', 'cpu': 20000, 'memory': 7168, 'disk': 1690, 'bandwidth': None, }, } class ElasticStackException(Exception): def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) class ElasticStackResponse(JsonResponse): def success(self): if self.status == 401: raise InvalidCredsError() return self.status >= 200 and self.status <= 299 def parse_error(self): error_header = self.headers.get('x-elastic-error', '') return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip()) class ElasticStackNodeSize(NodeSize): def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): self.id = id self.name = name self.cpu = cpu self.ram = ram self.disk = disk self.bandwidth = bandwidth self.price = price self.driver = driver def __repr__(self): return (('') % (self.id, self.name, self.cpu, self.ram, self.disk, self.bandwidth, self.price, self.driver.name)) class ElasticStackBaseConnection(ConnectionUserAndKey): """ Base connection class for the ElasticStack driver """ host = None responseCls = ElasticStackResponse def add_default_headers(self, headers): headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' headers['Authorization'] = \ ('Basic %s' % (base64.b64encode(b('%s:%s' % (self.user_id, self.key)))) .decode('utf-8')) return headers class ElasticStackBaseNodeDriver(NodeDriver): website = 'http://www.elasticstack.com' connectionCls = ElasticStackBaseConnection features = {"create_node": ["generates_password"]} def reboot_node(self, node): # Reboots the node response = self.connection.request( action='/servers/%s/reset' % (node.id), method='POST' ) return response.status == 204 def destroy_node(self, node): # Kills the server immediately response = self.connection.request( action='/servers/%s/destroy' % (node.id), method='POST' ) return response.status == 204 def list_images(self, location=None): # Returns a list of available pre-installed system drive images images = [] for key, value in self._standard_drives.items(): image = NodeImage( id=value['uuid'], name=value['description'], driver=self.connection.driver, extra={ 'size_gunzipped': value['size_gunzipped'] } ) images.append(image) return images def list_sizes(self, location=None): sizes = [] for key, value in INSTANCE_TYPES.items(): size = ElasticStackNodeSize( id=value['id'], name=value['name'], cpu=value['cpu'], ram=value['memory'], disk=value['disk'], bandwidth=value['bandwidth'], price=self._get_size_price(size_id=value['id']), driver=self.connection.driver ) sizes.append(size) return sizes def list_nodes(self): # Returns a list of active (running) nodes response = self.connection.request(action='/servers/info').object nodes = [] for data in response: node = self._to_node(data) nodes.append(node) return nodes def create_node(self, **kwargs): """Creates an ElasticStack instance @inherits: :class:`NodeDriver.create_node` :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword smp: Number of virtual processors or None to calculate based on the cpu speed :type smp: ``int`` :keyword nic_model: e1000, rtl8139 or virtio (if not specified, e1000 is used) :type nic_model: ``str`` :keyword vnc_password: If set, the same password is also used for SSH access with user toor, otherwise VNC access is disabled and no SSH login is possible. :type vnc_password: ``str`` """ size = kwargs['size'] image = kwargs['image'] smp = kwargs.get('smp', 'auto') nic_model = kwargs.get('nic_model', 'e1000') vnc_password = ssh_password = kwargs.get('vnc_password', None) if nic_model not in ('e1000', 'rtl8139', 'virtio'): raise ElasticStackException('Invalid NIC model specified') # check that drive size is not smaller than pre installed image size # First we create a drive with the specified size drive_data = {} drive_data.update({'name': kwargs['name'], 'size': '%sG' % (kwargs['size'].disk)}) response = self.connection.request(action='/drives/create', data=json.dumps(drive_data), method='POST').object if not response: raise ElasticStackException('Drive creation failed') drive_uuid = response['drive'] # Then we image the selected pre-installed system drive onto it response = self.connection.request( action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id), method='POST' ) if response.status not in (200, 204): raise ElasticStackException('Drive imaging failed') # We wait until the drive is imaged and then boot up the node # (in most cases, the imaging process shouldn't take longer # than a few minutes) response = self.connection.request( action='/drives/%s/info' % (drive_uuid) ).object imaging_start = time.time() while 'imaging' in response: response = self.connection.request( action='/drives/%s/info' % (drive_uuid) ).object elapsed_time = time.time() - imaging_start if ('imaging' in response and elapsed_time >= IMAGING_TIMEOUT): raise ElasticStackException('Drive imaging timed out') time.sleep(1) node_data = {} node_data.update({'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, 'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp}) node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) if vnc_password: node_data.update({'vnc': 'auto', 'vnc:password': vnc_password}) response = self.connection.request( action='/servers/create', data=json.dumps(node_data), method='POST' ).object if isinstance(response, list): nodes = [self._to_node(node, ssh_password) for node in response] else: nodes = self._to_node(response, ssh_password) return nodes # Extension methods def ex_set_node_configuration(self, node, **kwargs): """ Changes the configuration of the running server :param node: Node which should be used :type node: :class:`Node` :param kwargs: keyword arguments :type kwargs: ``dict`` :rtype: ``bool`` """ valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', '^boot$', '^nic:0:model$', '^nic:0:dhcp', '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', '^vnc:ip$', '^vnc:password$', '^vnc:tls', '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') invalid_keys = [] keys = list(kwargs.keys()) for key in keys: matches = False for regex in valid_keys: if re.match(regex, key): matches = True break if not matches: invalid_keys.append(key) if invalid_keys: raise ElasticStackException( 'Invalid configuration key specified: %s' % (',' .join(invalid_keys)) ) response = self.connection.request( action='/servers/%s/set' % (node.id), data=json.dumps(kwargs), method='POST' ) return (response.status == httplib.OK and response.body != '') def deploy_node(self, **kwargs): """ Create a new node, and start deployment. @inherits: :class:`NodeDriver.deploy_node` :keyword enable_root: If true, root password will be set to vnc_password (this will enable SSH access) and default 'toor' account will be deleted. :type enable_root: ``bool`` """ image = kwargs['image'] vnc_password = kwargs.get('vnc_password', None) enable_root = kwargs.get('enable_root', False) if not vnc_password: raise ValueError('You need to provide vnc_password argument ' 'if you want to use deployment') if (image in self._standard_drives and not self._standard_drives[image]['supports_deployment']): raise ValueError('Image %s does not support deployment' % (image.id)) if enable_root: script = ("unset HISTFILE;" "echo root:%s | chpasswd;" "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" "history -c") % vnc_password root_enable_script = ScriptDeployment(script=script, delete=True) deploy = kwargs.get('deploy', None) if deploy: if (isinstance(deploy, ScriptDeployment) or isinstance(deploy, SSHKeyDeployment)): deployment = MultiStepDeployment([deploy, root_enable_script]) elif isinstance(deploy, MultiStepDeployment): deployment = deploy deployment.add(root_enable_script) else: deployment = root_enable_script kwargs['deploy'] = deployment if not kwargs.get('ssh_username', None): kwargs['ssh_username'] = 'toor' return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs) def ex_shutdown_node(self, node): """ Sends the ACPI power-down event :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ response = self.connection.request( action='/servers/%s/shutdown' % (node.id), method='POST' ) return response.status == 204 def ex_destroy_drive(self, drive_uuid): """ Deletes a drive :param drive_uuid: Drive uuid which should be used :type drive_uuid: ``str`` :rtype: ``bool`` """ response = self.connection.request( action='/drives/%s/destroy' % (drive_uuid), method='POST' ) return response.status == 204 # Helper methods def _to_node(self, data, ssh_password=None): try: state = NODE_STATE_MAP[data['status']] except KeyError: state = NodeState.UNKNOWN if 'nic:0:dhcp:ip' in data: if isinstance(data['nic:0:dhcp:ip'], list): public_ip = data['nic:0:dhcp:ip'] else: public_ip = [data['nic:0:dhcp:ip']] else: public_ip = [] extra = {'cpu': data['cpu'], 'mem': data['mem']} if 'started' in data: extra['started'] = data['started'] if 'smp' in data: extra['smp'] = data['smp'] if 'vnc:ip' in data: extra['vnc:ip'] = data['vnc:ip'] if 'vnc:password' in data: extra['vnc:password'] = data['vnc:password'] boot_device = data['boot'] if isinstance(boot_device, list): for device in boot_device: extra[device] = data[device] else: extra[boot_device] = data[boot_device] if ssh_password: extra.update({'password': ssh_password}) node = Node(id=data['server'], name=data['name'], state=state, public_ips=public_ip, private_ips=None, driver=self.connection.driver, extra=extra) return node apache-libcloud-2.2.1/libcloud/compute/drivers/voxel.py0000664000175000017500000002532012701023453023053 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Voxel VoxCloud driver """ import datetime import hashlib from libcloud.utils.py3 import b from libcloud.common.base import XmlResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation VOXEL_API_HOST = "api.voxel.net" class VoxelResponse(XmlResponse): def __init__(self, response, connection): self.parsed = None super(VoxelResponse, self).__init__(response=response, connection=connection) def parse_body(self): if not self.body: return None if not self.parsed: self.parsed = super(VoxelResponse, self).parse_body() return self.parsed def parse_error(self): err_list = [] if not self.body: return None if not self.parsed: self.parsed = super(VoxelResponse, self).parse_body() for err in self.parsed.findall('err'): code = err.get('code') err_list.append("(%s) %s" % (code, err.get('msg'))) # From voxel docs: # 1: Invalid login or password # 9: Permission denied: user lacks access rights for this method if code == "1" or code == "9": # sucks, but only way to detect # bad authentication tokens so far raise InvalidCredsError(err_list[-1]) return "\n".join(err_list) def success(self): if not self.parsed: self.parsed = super(VoxelResponse, self).parse_body() stat = self.parsed.get('stat') if stat != "ok": return False return True class VoxelConnection(ConnectionUserAndKey): """ Connection class for the Voxel driver """ host = VOXEL_API_HOST responseCls = VoxelResponse def add_default_params(self, params): params = dict([(k, v) for k, v in list(params.items()) if v is not None]) params["key"] = self.user_id params["timestamp"] = datetime.datetime.utcnow().isoformat() + "+0000" keys = list(params.keys()) keys.sort() md5 = hashlib.md5() md5.update(b(self.key)) for key in keys: if params[key]: if not params[key] is None: md5.update(b("%s%s" % (key, params[key]))) else: md5.update(b(key)) params['api_sig'] = md5.hexdigest() return params VOXEL_INSTANCE_TYPES = {} RAM_PER_CPU = 2048 NODE_STATE_MAP = { 'IN_PROGRESS': NodeState.PENDING, 'QUEUED': NodeState.PENDING, 'SUCCEEDED': NodeState.RUNNING, 'shutting-down': NodeState.TERMINATED, 'terminated': NodeState.TERMINATED, 'unknown': NodeState.UNKNOWN, } class VoxelNodeDriver(NodeDriver): """ Voxel VoxCLOUD node driver """ connectionCls = VoxelConnection type = Provider.VOXEL name = 'Voxel VoxCLOUD' website = 'http://www.voxel.net/' def _initialize_instance_types(): for cpus in range(1, 14): if cpus == 1: name = "Single CPU" else: name = "%d CPUs" % cpus id = "%dcpu" % cpus ram = cpus * RAM_PER_CPU VOXEL_INSTANCE_TYPES[id] = { 'id': id, 'name': name, 'ram': ram, 'disk': None, 'bandwidth': None, 'price': None} features = {"create_node": [], "list_sizes": ["variable_disk"]} _initialize_instance_types() def list_nodes(self): params = {"method": "voxel.devices.list"} result = self.connection.request('/', params=params).object return self._to_nodes(result) def list_sizes(self, location=None): return [NodeSize(driver=self.connection.driver, **i) for i in list(VOXEL_INSTANCE_TYPES.values())] def list_images(self, location=None): params = {"method": "voxel.images.list"} result = self.connection.request('/', params=params).object return self._to_images(result) def create_node(self, **kwargs): """Create Voxel Node :keyword name: the name to assign the node (mandatory) :type name: ``str`` :keyword image: distribution to deploy :type image: :class:`NodeImage` :keyword size: the plan size to create (mandatory) Requires size.disk (GB) to be set manually :type size: :class:`NodeSize` :keyword location: which datacenter to create the node in :type location: :class:`NodeLocation` :keyword ex_privateip: Backend IP address to assign to node; must be chosen from the customer's private VLAN assignment. :type ex_privateip: ``str`` :keyword ex_publicip: Public-facing IP address to assign to node; must be chosen from the customer's public VLAN assignment. :type ex_publicip: ``str`` :keyword ex_rootpass: Password for root access; generated if unset. :type ex_rootpass: ``str`` :keyword ex_consolepass: Password for remote console; generated if unset. :type ex_consolepass: ``str`` :keyword ex_sshuser: Username for SSH access :type ex_sshuser: ``str`` :keyword ex_sshpass: Password for SSH access; generated if unset. :type ex_sshpass: ``str`` :keyword ex_voxel_access: Allow access Voxel administrative access. Defaults to False. :type ex_voxel_access: ``bool`` :rtype: :class:`Node` or ``None`` """ # assert that disk > 0 if not kwargs["size"].disk: raise ValueError("size.disk must be non-zero") # convert voxel_access to string boolean if needed voxel_access = kwargs.get("ex_voxel_access", None) if voxel_access is not None: voxel_access = "true" if voxel_access else "false" params = { 'method': 'voxel.voxcloud.create', 'hostname': kwargs["name"], 'disk_size': int(kwargs["size"].disk), 'facility': kwargs["location"].id, 'image_id': kwargs["image"].id, 'processing_cores': kwargs["size"].ram / RAM_PER_CPU, 'backend_ip': kwargs.get("ex_privateip", None), 'frontend_ip': kwargs.get("ex_publicip", None), 'admin_password': kwargs.get("ex_rootpass", None), 'console_password': kwargs.get("ex_consolepass", None), 'ssh_username': kwargs.get("ex_sshuser", None), 'ssh_password': kwargs.get("ex_sshpass", None), 'voxel_access': voxel_access, } object = self.connection.request('/', params=params).object if self._getstatus(object): return Node( id=object.findtext("device/id"), name=kwargs["name"], state=NODE_STATE_MAP[object.findtext("device/status")], public_ips=kwargs.get("publicip", None), private_ips=kwargs.get("privateip", None), driver=self.connection.driver ) else: return None def reboot_node(self, node): params = {'method': 'voxel.devices.power', 'device_id': node.id, 'power_action': 'reboot'} return self._getstatus( self.connection.request('/', params=params).object) def destroy_node(self, node): params = {'method': 'voxel.voxcloud.delete', 'device_id': node.id} return self._getstatus( self.connection.request('/', params=params).object) def list_locations(self): params = {"method": "voxel.voxcloud.facilities.list"} result = self.connection.request('/', params=params).object nodes = self._to_locations(result) return nodes def _getstatus(self, element): status = element.attrib["stat"] return status == "ok" def _to_locations(self, object): return [NodeLocation(element.attrib["label"], element.findtext("description"), element.findtext("description"), self) for element in object.findall('facilities/facility')] def _to_nodes(self, object): nodes = [] for element in object.findall('devices/device'): if element.findtext("type") == "Virtual Server": try: state = self.NODE_STATE_MAP[element.attrib['status']] except KeyError: state = NodeState.UNKNOWN public_ip = private_ip = None ipassignments = element.findall("ipassignments/ipassignment") for ip in ipassignments: if ip.attrib["type"] == "frontend": public_ip = ip.text elif ip.attrib["type"] == "backend": private_ip = ip.text nodes.append(Node(id=element.attrib['id'], name=element.attrib['label'], state=state, public_ips=public_ip, private_ips=private_ip, driver=self.connection.driver)) return nodes def _to_images(self, object): images = [] for element in object.findall("images/image"): images.append(NodeImage(id=element.attrib["id"], name=element.attrib["summary"], driver=self.connection.driver)) return images apache-libcloud-2.2.1/libcloud/compute/drivers/libvirt_driver.py0000664000175000017500000003352213153541406024754 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import re import os import time import platform import subprocess import mimetypes from os.path import join as pjoin from collections import defaultdict from libcloud.utils.py3 import ET from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeState from libcloud.compute.types import Provider from libcloud.utils.networking import is_public_subnet from libcloud.utils.py3 import ensure_string try: import libvirt have_libvirt = True except ImportError: have_libvirt = False class LibvirtNodeDriver(NodeDriver): """ Libvirt (http://libvirt.org/) node driver. To enable debug mode, set LIBVIR_DEBUG environment variable. """ type = Provider.LIBVIRT name = 'Libvirt' website = 'http://libvirt.org/' NODE_STATE_MAP = { 0: NodeState.TERMINATED, # no state 1: NodeState.RUNNING, # domain is running 2: NodeState.PENDING, # domain is blocked on resource 3: NodeState.TERMINATED, # domain is paused by user 4: NodeState.TERMINATED, # domain is being shut down 5: NodeState.TERMINATED, # domain is shut off 6: NodeState.UNKNOWN, # domain is crashed 7: NodeState.UNKNOWN, # domain is suspended by guest power management } def __init__(self, uri, key=None, secret=None): """ :param uri: Hypervisor URI (e.g. vbox:///session, qemu:///system, etc.). :type uri: ``str`` :param key: the username for a remote libvirtd server :type key: ``str`` :param secret: the password for a remote libvirtd server :type key: ``str`` """ if not have_libvirt: raise RuntimeError('Libvirt driver requires \'libvirt\' Python ' + 'package') self._uri = uri self._key = key self._secret = secret if uri is not None and '+tcp' in self._uri: if key is None and secret is None: raise RuntimeError('The remote Libvirt instance requires ' + 'authentication, please set \'key\' and ' + '\'secret\' parameters') auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], self._cred_callback, None] self.connection = libvirt.openAuth(uri, auth, 0) else: self.connection = libvirt.open(uri) if uri is None: self._uri = self.connection.getInfo() def _cred_callback(self, cred, user_data): """ Callback for the authentication scheme, which will provide username and password for the login. Reference: ( http://bit.ly/1U5yyQg ) :param cred: The credentials requested and the return :type cred: ``list`` :param user_data: Custom data provided to the authentication routine :type user_data: ``list`` :rtype: ``int`` """ for credential in cred: if credential[0] == libvirt.VIR_CRED_AUTHNAME: credential[4] = self._key elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: credential[4] = self._secret return 0 def list_nodes(self): domains = self.connection.listAllDomains() nodes = self._to_nodes(domains=domains) return nodes def reboot_node(self, node): domain = self._get_domain_for_node(node=node) return domain.reboot(flags=0) == 0 def destroy_node(self, node): domain = self._get_domain_for_node(node=node) return domain.destroy() == 0 def ex_start_node(self, node): """ Start a stopped node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ domain = self._get_domain_for_node(node=node) return domain.create() == 0 def ex_shutdown_node(self, node): """ Shutdown a running node. Note: Usually this will result in sending an ACPI event to the node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ domain = self._get_domain_for_node(node=node) return domain.shutdown() == 0 def ex_suspend_node(self, node): """ Suspend a running node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ domain = self._get_domain_for_node(node=node) return domain.suspend() == 0 def ex_resume_node(self, node): """ Resume a suspended node. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ domain = self._get_domain_for_node(node=node) return domain.resume() == 0 def ex_get_node_by_uuid(self, uuid): """ Retrieve Node object for a domain with a provided uuid. :param uuid: Uuid of the domain. :type uuid: ``str`` """ domain = self._get_domain_for_uuid(uuid=uuid) node = self._to_node(domain=domain) return node def ex_get_node_by_name(self, name): """ Retrieve Node object for a domain with a provided name. :param name: Name of the domain. :type name: ``str`` """ domain = self._get_domain_for_name(name=name) node = self._to_node(domain=domain) return node def ex_take_node_screenshot(self, node, directory, screen=0): """ Take a screenshot of a monitoring of a running instance. :param node: Node to take the screenshot of. :type node: :class:`libcloud.compute.base.Node` :param directory: Path where the screenshot will be saved. :type directory: ``str`` :param screen: ID of the monitor to take the screenshot of. :type screen: ``int`` :return: Full path where the screenshot has been saved. :rtype: ``str`` """ if not os.path.exists(directory) or not os.path.isdir(directory): raise ValueError('Invalid value for directory argument') domain = self._get_domain_for_node(node=node) stream = self.connection.newStream() mime_type = domain.screenshot(stream=stream, screen=0) extensions = mimetypes.guess_all_extensions(type=mime_type) if extensions: extension = extensions[0] else: extension = '.png' name = 'screenshot-%s%s' % (int(time.time()), extension) file_path = pjoin(directory, name) with open(file_path, 'wb') as fp: def write(stream, buf, opaque): fp.write(buf) stream.recvAll(write, None) try: stream.finish() except Exception: # Finish is not supported by all backends pass return file_path def ex_get_hypervisor_hostname(self): """ Return a system hostname on which the hypervisor is running. """ hostname = self.connection.getHostname() return hostname def ex_get_hypervisor_sysinfo(self): """ Retrieve hypervisor system information. :rtype: ``dict`` """ xml = self.connection.getSysinfo() etree = ET.XML(xml) attributes = ['bios', 'system', 'processor', 'memory_device'] sysinfo = {} for attribute in attributes: element = etree.find(attribute) entries = self._get_entries(element=element) sysinfo[attribute] = entries return sysinfo def _to_nodes(self, domains): nodes = [self._to_node(domain=domain) for domain in domains] return nodes def _to_node(self, domain): state, max_mem, memory, vcpu_count, used_cpu_time = domain.info() state = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN) public_ips, private_ips = [], [] ip_addresses = self._get_ip_addresses_for_domain(domain) for ip_address in ip_addresses: if is_public_subnet(ip_address): public_ips.append(ip_address) else: private_ips.append(ip_address) extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(), 'types': self.connection.getType(), 'used_memory': memory / 1024, 'vcpu_count': vcpu_count, 'used_cpu_time': used_cpu_time} node = Node(id=domain.ID(), name=domain.name(), state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra=extra) node._uuid = domain.UUIDString() # we want to use a custom UUID return node def _get_ip_addresses_for_domain(self, domain): """ Retrieve IP addresses for the provided domain. Note: This functionality is currently only supported on Linux and only works if this code is run on the same machine as the VMs run on. :return: IP addresses for the provided domain. :rtype: ``list`` """ result = [] if platform.system() != 'Linux': # Only Linux is supported atm return result if '///' not in self._uri: # Only local libvirtd is supported atm return result mac_addresses = self._get_mac_addresses_for_domain(domain=domain) arp_table = {} try: cmd = ['arp', '-an'] child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = child.communicate() arp_table = self._parse_ip_table_arp(arp_output=stdout) except OSError as e: if e.errno == 2: cmd = ['ip', 'neigh'] child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = child.communicate() arp_table = self._parse_ip_table_neigh(arp_output=stdout) for mac_address in mac_addresses: if mac_address in arp_table: ip_addresses = arp_table[mac_address] result.extend(ip_addresses) return result def _get_mac_addresses_for_domain(self, domain): """ Parses network interface MAC addresses from the provided domain. """ xml = domain.XMLDesc() etree = ET.XML(xml) elems = etree.findall("devices/interface[@type='network']/mac") result = [] for elem in elems: mac_address = elem.get('address') result.append(mac_address) return result def _get_domain_for_node(self, node): """ Return libvirt domain object for the provided node. """ domain = self.connection.lookupByUUIDString(node.uuid) return domain def _get_domain_for_uuid(self, uuid): """ Return libvirt domain object for the provided uuid. """ domain = self.connection.lookupByUUIDString(uuid) return domain def _get_domain_for_name(self, name): """ Return libvirt domain object for the provided name. """ domain = self.connection.lookupByName(name) return domain def _get_entries(self, element): """ Parse entries dictionary. :rtype: ``dict`` """ elements = element.findall('entry') result = {} for element in elements: name = element.get('name') value = element.text result[name] = value return result def _parse_ip_table_arp(self, arp_output): """ Sets up the regexp for parsing out IP addresses from the 'arp -an' command and pass it along to the parser function. :return: Dictionary from the parsing funtion :rtype: ``dict`` """ arp_regex = re.compile('.*?\((.*?)\) at (.*?)\s+') return self._parse_mac_addr_table(arp_output, arp_regex) def _parse_ip_table_neigh(self, ip_output): """ Sets up the regexp for parsing out IP addresses from the 'ip neighbor' command and pass it along to the parser function. :return: Dictionary from the parsing function :rtype: ``dict`` """ ip_regex = re.compile('(.*?)\s+.*lladdr\s+(.*?)\s+') return self._parse_mac_addr_table(ip_output, ip_regex) def _parse_mac_addr_table(self, cmd_output, mac_regex): """ Parse the command output and return a dictionary which maps mac address to an IP address. :return: Dictionary which maps mac address to IP address. :rtype: ``dict`` """ lines = ensure_string(cmd_output).split('\n') arp_table = defaultdict(list) for line in lines: match = mac_regex.match(line) if not match: continue groups = match.groups() ip_address = groups[0] mac_address = groups[1] arp_table[mac_address].append(ip_address) return arp_table apache-libcloud-2.2.1/libcloud/compute/drivers/brightbox.py0000664000175000017500000002522612701023453023713 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Brightbox Driver """ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.brightbox import BrightboxConnection from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation import base64 API_VERSION = '1.0' def _extract(d, keys): return dict((k, d[k]) for k in keys if k in d and d[k] is not None) class BrightboxNodeDriver(NodeDriver): """ Brightbox node driver """ connectionCls = BrightboxConnection type = Provider.BRIGHTBOX name = 'Brightbox' website = 'http://www.brightbox.co.uk/' NODE_STATE_MAP = {'creating': NodeState.PENDING, 'active': NodeState.RUNNING, 'inactive': NodeState.UNKNOWN, 'deleting': NodeState.UNKNOWN, 'deleted': NodeState.TERMINATED, 'failed': NodeState.UNKNOWN, 'unavailable': NodeState.UNKNOWN} def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=API_VERSION, **kwargs): super(BrightboxNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, **kwargs) def _to_node(self, data): extra_data = _extract(data, ['fqdn', 'user_data', 'status', 'interfaces', 'snapshots', 'server_groups', 'hostname', 'started_at', 'created_at', 'deleted_at']) extra_data['zone'] = self._to_location(data['zone']) ipv6_addresses = [interface['ipv6_address'] for interface in data['interfaces'] if 'ipv6_address' in interface] private_ips = [interface['ipv4_address'] for interface in data['interfaces'] if 'ipv4_address' in interface] public_ips = [cloud_ip['public_ip'] for cloud_ip in data['cloud_ips']] public_ips += ipv6_addresses return Node( id=data['id'], name=data['name'], state=self.NODE_STATE_MAP[data['status']], private_ips=private_ips, public_ips=public_ips, driver=self.connection.driver, size=self._to_size(data['server_type']), image=self._to_image(data['image']), extra=extra_data ) def _to_image(self, data): extra_data = _extract(data, ['arch', 'compatibility_mode', 'created_at', 'description', 'disk_size', 'min_ram', 'official', 'owner', 'public', 'source', 'source_type', 'status', 'username', 'virtual_size', 'licence_name']) if data.get('ancestor', None): extra_data['ancestor'] = self._to_image(data['ancestor']) return NodeImage( id=data['id'], name=data['name'], driver=self, extra=extra_data ) def _to_size(self, data): return NodeSize( id=data['id'], name=data['name'], ram=data['ram'], disk=data['disk_size'], bandwidth=0, price=0, driver=self ) def _to_location(self, data): if data: return NodeLocation( id=data['id'], name=data['handle'], country='GB', driver=self ) else: return None def _post(self, path, data={}): headers = {'Content-Type': 'application/json'} return self.connection.request(path, data=data, headers=headers, method='POST') def _put(self, path, data={}): headers = {'Content-Type': 'application/json'} return self.connection.request(path, data=data, headers=headers, method='PUT') def create_node(self, **kwargs): """Create a new Brightbox node Reference: https://api.gb1.brightbox.com/1.0/#server_create_server @inherits: :class:`NodeDriver.create_node` :keyword ex_userdata: User data :type ex_userdata: ``str`` :keyword ex_servergroup: Name or list of server group ids to add server to :type ex_servergroup: ``str`` or ``list`` of ``str`` """ data = { 'name': kwargs['name'], 'server_type': kwargs['size'].id, 'image': kwargs['image'].id, } if 'ex_userdata' in kwargs: data['user_data'] = base64.b64encode(b(kwargs['ex_userdata'])) \ .decode('ascii') if 'location' in kwargs: data['zone'] = kwargs['location'].id if 'ex_servergroup' in kwargs: if not isinstance(kwargs['ex_servergroup'], list): kwargs['ex_servergroup'] = [kwargs['ex_servergroup']] data['server_groups'] = kwargs['ex_servergroup'] data = self._post('/%s/servers' % self.api_version, data).object return self._to_node(data) def destroy_node(self, node): response = self.connection.request( '/%s/servers/%s' % (self.api_version, node.id), method='DELETE') return response.status == httplib.ACCEPTED def list_nodes(self): data = self.connection.request('/%s/servers' % self.api_version).object return list(map(self._to_node, data)) def list_images(self, location=None): data = self.connection.request('/%s/images' % self.api_version).object return list(map(self._to_image, data)) def list_sizes(self): data = self.connection.request('/%s/server_types' % self.api_version) \ .object return list(map(self._to_size, data)) def list_locations(self): data = self.connection.request('/%s/zones' % self.api_version).object return list(map(self._to_location, data)) def ex_list_cloud_ips(self): """ List Cloud IPs @note: This is an API extension for use on Brightbox :rtype: ``list`` of ``dict`` """ return self.connection.request('/%s/cloud_ips' % self.api_version) \ .object def ex_create_cloud_ip(self, reverse_dns=None): """ Requests a new cloud IP address for the account @note: This is an API extension for use on Brightbox :param reverse_dns: Reverse DNS hostname :type reverse_dns: ``str`` :rtype: ``dict`` """ params = {} if reverse_dns: params['reverse_dns'] = reverse_dns return self._post('/%s/cloud_ips' % self.api_version, params).object def ex_update_cloud_ip(self, cloud_ip_id, reverse_dns): """ Update some details of the cloud IP address @note: This is an API extension for use on Brightbox :param cloud_ip_id: The id of the cloud ip. :type cloud_ip_id: ``str`` :param reverse_dns: Reverse DNS hostname :type reverse_dns: ``str`` :rtype: ``dict`` """ response = self._put('/%s/cloud_ips/%s' % (self.api_version, cloud_ip_id), {'reverse_dns': reverse_dns}) return response.status == httplib.OK def ex_map_cloud_ip(self, cloud_ip_id, interface_id): """ Maps (or points) a cloud IP address at a server's interface or a load balancer to allow them to respond to public requests @note: This is an API extension for use on Brightbox :param cloud_ip_id: The id of the cloud ip. :type cloud_ip_id: ``str`` :param interface_id: The Interface ID or LoadBalancer ID to which this Cloud IP should be mapped to :type interface_id: ``str`` :return: True if the mapping was successful. :rtype: ``bool`` """ response = self._post('/%s/cloud_ips/%s/map' % (self.api_version, cloud_ip_id), {'destination': interface_id}) return response.status == httplib.ACCEPTED def ex_unmap_cloud_ip(self, cloud_ip_id): """ Unmaps a cloud IP address from its current destination making it available to remap. This remains in the account's pool of addresses @note: This is an API extension for use on Brightbox :param cloud_ip_id: The id of the cloud ip. :type cloud_ip_id: ``str`` :return: True if the unmap was successful. :rtype: ``bool`` """ response = self._post('/%s/cloud_ips/%s/unmap' % (self.api_version, cloud_ip_id)) return response.status == httplib.ACCEPTED def ex_destroy_cloud_ip(self, cloud_ip_id): """ Release the cloud IP address from the account's ownership @note: This is an API extension for use on Brightbox :param cloud_ip_id: The id of the cloud ip. :type cloud_ip_id: ``str`` :return: True if the unmap was successful. :rtype: ``bool`` """ response = self.connection.request( '/%s/cloud_ips/%s' % (self.api_version, cloud_ip_id), method='DELETE') return response.status == httplib.OK apache-libcloud-2.2.1/libcloud/compute/drivers/azure_arm.py0000664000175000017500000021670113160302324023705 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Driver for Microsoft Azure Resource Manager (ARM) Virtual Machines provider. http://azure.microsoft.com/en-us/services/virtual-machines/ """ import base64 import binascii import os import time from libcloud.common.azure_arm import AzureResourceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey from libcloud.compute.types import (NodeState, StorageVolumeState, VolumeSnapshotState) from libcloud.common.types import LibcloudError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.common.exceptions import BaseHTTPError from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver from libcloud.utils.py3 import basestring from libcloud.utils import iso8601 RESOURCE_API_VERSION = '2016-04-30-preview' class AzureImage(NodeImage): """Represents a Marketplace node image that an Azure VM can boot from.""" def __init__(self, version, sku, offer, publisher, location, driver): self.publisher = publisher self.offer = offer self.sku = sku self.version = version self.location = location urn = "%s:%s:%s:%s" % (self.publisher, self.offer, self.sku, self.version) name = "%s %s %s %s" % (self.publisher, self.offer, self.sku, self.version) super(AzureImage, self).__init__(urn, name, driver) def __repr__(self): return (('') % (self.id, self.name, self.location)) class AzureVhdImage(NodeImage): """Represents a VHD node image that an Azure VM can boot from.""" def __init__(self, storage_account, blob_container, name, driver): urn = "https://%s.blob%s/%s/%s" % (storage_account, driver.connection.storage_suffix, blob_container, name) super(AzureVhdImage, self).__init__(urn, name, driver) def __repr__(self): return (('') % (self.id, self.name)) class AzureResourceGroup(object): """Represent an Azure resource group.""" def __init__(self, id, name, location, extra): self.id = id self.name = name self.location = location self.extra = extra def __repr__(self): return (('') % (self.id, self.name, self.location)) class AzureNetworkSecurityGroup(object): """Represent an Azure network security group.""" def __init__(self, id, name, location, extra): self.id = id self.name = name self.location = location self.extra = extra def __repr__(self): return ( ('') % (self.id, self.name, self.location)) class AzureNetwork(object): """Represent an Azure virtual network.""" def __init__(self, id, name, location, extra): self.id = id self.name = name self.location = location self.extra = extra def __repr__(self): return (('') % (self.id, self.name, self.location)) class AzureSubnet(object): """Represents a subnet of an Azure virtual network.""" def __init__(self, id, name, extra): self.id = id self.name = name self.extra = extra def __repr__(self): return (('') % (self.id, self.name)) class AzureNic(object): """Represents an Azure virtual network interface controller (NIC).""" def __init__(self, id, name, location, extra): self.id = id self.name = name self.location = location self.extra = extra def __repr__(self): return (('') % (self.id, self.name)) class AzureIPAddress(object): """Represents an Azure public IP address resource.""" def __init__(self, id, name, extra): self.id = id self.name = name self.extra = extra def __repr__(self): return (('') % (self.id, self.name)) class AzureNodeDriver(NodeDriver): """Compute node driver for Azure Resource Manager.""" connectionCls = AzureResourceManagementConnection name = 'Azure Virtual machines' website = 'http://azure.microsoft.com/en-us/services/virtual-machines/' type = Provider.AZURE_ARM features = {'create_node': ['ssh_key', 'password']} # The API doesn't provide state or country information, so fill it in. # Information from https://azure.microsoft.com/en-us/regions/ _location_to_country = { "centralus": "Iowa, USA", "eastus": "Virginia, USA", "eastus2": "Virginia, USA", "usgoviowa": "Iowa, USA", "usgovvirginia": "Virginia, USA", "northcentralus": "Illinois, USA", "southcentralus": "Texas, USA", "westus": "California, USA", "northeurope": "Ireland", "westeurope": "Netherlands", "eastasia": "Hong Kong", "southeastasia": "Singapore", "japaneast": "Tokyo, Japan", "japanwest": "Osaka, Japan", "brazilsouth": "Sao Paulo State, Brazil", "australiaeast": "New South Wales, Australia", "australiasoutheast": "Victoria, Australia" } SNAPSHOT_STATE_MAP = { 'creating': VolumeSnapshotState.CREATING, 'updating': VolumeSnapshotState.UPDATING, 'succeeded': VolumeSnapshotState.AVAILABLE, 'failed': VolumeSnapshotState.ERROR } def __init__(self, tenant_id, subscription_id, key, secret, secure=True, host=None, port=None, api_version=None, region=None, **kwargs): self.tenant_id = tenant_id self.subscription_id = subscription_id self.cloud_environment = kwargs.get("cloud_environment") super(AzureNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) if self.region is not None: loc_id = self.region.lower().replace(" ", "") country = self._location_to_country.get(loc_id) self.default_location = NodeLocation(loc_id, self.region, country, self) else: self.default_location = None def list_locations(self): """ List data centers available with the current subscription. :return: list of node location objects :rtype: ``list`` of :class:`.NodeLocation` """ action = "/subscriptions/%s/providers/Microsoft.Compute" % ( self.subscription_id) r = self.connection.request(action, params={"api-version": "2015-01-01"}) for rt in r.object["resourceTypes"]: if rt["resourceType"] == "virtualMachines": return [self._to_location(l) for l in rt["locations"]] return [] def list_sizes(self, location=None): """ List available VM sizes. :param location: The location at which to list sizes (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :return: list of node size objects :rtype: ``list`` of :class:`.NodeSize` """ if location is None: if self.default_location: location = self.default_location else: raise ValueError("location is required.") action = \ "/subscriptions/%s/providers/Microsoft" \ ".Compute/locations/%s/vmSizes" \ % (self.subscription_id, location.id) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [self._to_node_size(d) for d in r.object["value"]] def list_images(self, location=None, ex_publisher=None, ex_offer=None, ex_sku=None, ex_version=None): """ List available VM images to boot from. :param location: The location at which to list images (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :param ex_publisher: Filter by publisher, or None to list all publishers. :type ex_publisher: ``str`` :param ex_offer: Filter by offer, or None to list all offers. :type ex_offer: ``str`` :param ex_sku: Filter by sku, or None to list all skus. :type ex_sku: ``str`` :param ex_version: Filter by version, or None to list all versions. :type ex_version: ``str`` :return: list of node image objects. :rtype: ``list`` of :class:`.AzureImage` """ images = [] if location is None: locations = [self.default_location] else: locations = [location] for loc in locations: if not ex_publisher: publishers = self.ex_list_publishers(loc) else: publishers = [( "/subscriptions/%s/providers/Microsoft" ".Compute/locations/%s/publishers/%s" % (self.subscription_id, loc.id, ex_publisher), ex_publisher)] for pub in publishers: if not ex_offer: offers = self.ex_list_offers(pub[0]) else: offers = [("%s/artifacttypes/vmimage/offers/%s" % ( pub[0], ex_offer), ex_offer)] for off in offers: if not ex_sku: skus = self.ex_list_skus(off[0]) else: skus = [("%s/skus/%s" % (off[0], ex_sku), ex_sku)] for sku in skus: if not ex_version: versions = self.ex_list_image_versions(sku[0]) else: versions = [("%s/versions/%s" % ( sku[0], ex_version), ex_version)] for v in versions: images.append(AzureImage(v[1], sku[1], off[1], pub[1], loc.id, self.connection.driver)) return images def get_image(self, image_id, location=None): """Returns a single node image from a provider. :param image_id: Either an image urn in the form `Publisher:Offer:Sku:Version` or a Azure blob store URI in the form `http://storageaccount.blob.core.windows.net/container/image.vhd` pointing to a VHD file. :type image_id: ``str`` :param location: The location at which to search for the image (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :rtype :class:`.AzureImage`: or :class:`.AzureVhdImage`: :return: AzureImage or AzureVhdImage instance on success. """ if image_id.startswith("http"): (storageAccount, blobContainer, blob) = _split_blob_uri(image_id) return AzureVhdImage(storageAccount, blobContainer, blob, self) else: (ex_publisher, ex_offer, ex_sku, ex_version) = image_id.split(":") i = self.list_images(location, ex_publisher, ex_offer, ex_sku, ex_version) return i[0] if i else None def list_nodes(self, ex_resource_group=None, ex_fetch_nic=True): """ List all nodes. :param ex_resource_group: List nodes in a specific resource group. :type ex_urn: ``str`` :param ex_fetch_nic: Fetch NIC resources in order to get IP address information for nodes (requires extra API calls). :type ex_urn: ``bool`` :return: list of node objects :rtype: ``list`` of :class:`.Node` """ if ex_resource_group: action = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Compute/virtualMachines" \ % (self.subscription_id, ex_resource_group) else: action = "/subscriptions/%s/providers/Microsoft.Compute/" \ "virtualMachines" \ % (self.subscription_id) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [self._to_node(n, fetch_nic=ex_fetch_nic) for n in r.object["value"]] def create_node(self, name, size, image, auth, ex_resource_group, ex_storage_account, ex_blob_container="vhds", location=None, ex_user_name="azureuser", ex_network=None, ex_subnet=None, ex_nic=None, ex_tags={}, ex_customdata="", ex_use_managed_disks=False, ex_storage_account_type="Standard_LRS"): """Create a new node instance. This instance will be started automatically. This driver supports the ``ssh_key`` feature flag for ``created_node`` so you can upload a public key into the new instance:: >>> from libcloud.compute.drivers.azure_arm import AzureNodeDriver >>> driver = AzureNodeDriver(...) >>> auth = NodeAuthSSHKey('pubkey data here') >>> node = driver.create_node("test_node", auth=auth) This driver also supports the ``password`` feature flag for ``create_node`` so you can set a password:: >>> driver = AzureNodeDriver(...) >>> auth = NodeAuthPassword('mysecretpassword') >>> node = driver.create_node("test_node", auth=auth, ...) If you don't provide the ``auth`` argument libcloud will assign a password: >>> driver = AzureNodeDriver(...) >>> node = driver.create_node("test_node", ...) >>> password = node.extra["properties"] \ ["osProfile"]["adminPassword"] :param name: String with a name for this new node (required) :type name: ``str`` :param size: The size of resources allocated to this node. (required) :type size: :class:`.NodeSize` :param image: OS Image to boot on node. (required) :type image: :class:`.AzureImage` :param location: Which data center to create a node in. (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :param auth: Initial authentication information for the node (optional) :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword` :param ex_resource_group: The resource group in which to create the node :type ex_resource_group: ``str`` :param ex_storage_account: The storage account id in which to store the node's disk image. Note: when booting from a user image (AzureVhdImage) the source image and the node image must use the same storage account. :type ex_storage_account: ``str`` :param ex_blob_container: The name of the blob container on the storage account in which to store the node's disk image (optional, default "vhds") :type ex_blob_container: ``str`` :param ex_user_name: User name for the initial admin user (optional, default "azureuser") :type ex_user_name: ``str`` :param ex_network: The virtual network the node will be attached to. Must provide either `ex_network` (to create a default NIC for the node on the given network) or `ex_nic` (to supply the NIC explicitly). :type ex_network: ``str`` :param ex_subnet: If ex_network is provided, the subnet of the virtual network the node will be attached to. Optional, default is the "default" subnet. :type ex_subnet: ``str`` :param ex_nic: A virtual NIC to attach to this Node, from `ex_create_network_interface` or `ex_get_nic`. Must provide either `ex_nic` (to supply the NIC explicitly) or ex_network (to create a default NIC for the node on the given network). :type ex_nic: :class:`AzureNic` :param ex_tags: Optional tags to associate with this node. :type ex_tags: ``dict`` :param ex_customdata: Custom data that will be placed in the file /var/lib/waagent/CustomData https://azure.microsoft.com/en-us/documentation/ \ articles/virtual-machines-how-to-inject-custom-data/ :type ex_customdata: ``str`` :param ex_use_managed_disks: Enable this feature to have Azure automatically manage the availability of disks to provide data redundancy and fault tolerance, without creating and managing storage accounts on your own. Managed disks may not be available in all regions (default False). :type ex_use_managed_disks: ``bool`` :param ex_storage_account_type: The Storage Account type, ``Standard_LRS``(HDD disks) or ``Premium_LRS``(SSD disks). :type ex_storage_account_type: str :return: The newly created node. :rtype: :class:`.Node` """ if location is None: location = self.default_location if ex_nic is None: if ex_network is None: raise ValueError("Must provide either ex_network or ex_nic") if ex_subnet is None: ex_subnet = "default" subnet_id = "/subscriptions/%s/resourceGroups/%s/providers" \ "/Microsoft.Network/virtualnetworks/%s/subnets/%s" % \ (self.subscription_id, ex_resource_group, ex_network, ex_subnet) subnet = AzureSubnet(subnet_id, ex_subnet, {}) ex_nic = self.ex_create_network_interface(name + "-nic", subnet, ex_resource_group, location) auth = self._get_and_check_auth(auth) target = "/subscriptions/%s/resourceGroups/%s/providers" \ "/Microsoft.Compute/virtualMachines/%s" % \ (self.subscription_id, ex_resource_group, name) if isinstance(image, AzureVhdImage): instance_vhd = self._get_instance_vhd( name=name, ex_resource_group=ex_resource_group, ex_storage_account=ex_storage_account, ex_blob_container=ex_blob_container) storage_profile = { "osDisk": { "name": name, "osType": "linux", "caching": "ReadWrite", "createOption": "FromImage", "image": { "uri": image.id }, "vhd": { "uri": instance_vhd, } } } if ex_use_managed_disks: raise LibcloudError( "Creating managed OS disk from %s image " "type is not supported." % type(image)) elif isinstance(image, AzureImage): storage_profile = { "imageReference": { "publisher": image.publisher, "offer": image.offer, "sku": image.sku, "version": image.version }, "osDisk": { "name": name, "osType": "linux", "caching": "ReadWrite", "createOption": "FromImage" } } if ex_use_managed_disks: storage_profile["osDisk"]["managedDisk"] = { "storageAccountType": ex_storage_account_type } else: instance_vhd = self._get_instance_vhd( name=name, ex_resource_group=ex_resource_group, ex_storage_account=ex_storage_account, ex_blob_container=ex_blob_container) storage_profile["osDisk"]["vhd"] = { "uri": instance_vhd } else: raise LibcloudError( "Unknown image type %s, expected one of AzureImage, " "AzureVhdImage." % type(image)) data = { "id": target, "name": name, "type": "Microsoft.Compute/virtualMachines", "location": location.id, "tags": ex_tags, "properties": { "hardwareProfile": { "vmSize": size.id }, "storageProfile": storage_profile, "osProfile": { "computerName": name }, "networkProfile": { "networkInterfaces": [ { "id": ex_nic.id } ] } } } if ex_customdata: data["properties"]["osProfile"]["customData"] = \ base64.b64encode(ex_customdata) data["properties"]["osProfile"]["adminUsername"] = ex_user_name if isinstance(auth, NodeAuthSSHKey): data["properties"]["osProfile"]["adminPassword"] = \ binascii.hexlify(os.urandom(20)) data["properties"]["osProfile"]["linuxConfiguration"] = { "disablePasswordAuthentication": "true", "ssh": { "publicKeys": [ { "path": '/home/%s/.ssh/authorized_keys' % ( ex_user_name), "keyData": auth.pubkey } ] } } elif isinstance(auth, NodeAuthPassword): data["properties"]["osProfile"]["linuxConfiguration"] = { "disablePasswordAuthentication": "false" } data["properties"]["osProfile"]["adminPassword"] = auth.password else: raise ValueError( "Must provide NodeAuthSSHKey or NodeAuthPassword in auth") r = self.connection.request( target, params={"api-version": RESOURCE_API_VERSION}, data=data, method="PUT") node = self._to_node(r.object) node.size = size node.image = image return node def reboot_node(self, node): """ Reboot a node. :param node: The node to be rebooted :type node: :class:`.Node` :return: True if the reboot was successful, otherwise False :rtype: ``bool`` """ target = "%s/restart" % node.id try: self.connection.request( target, params={"api-version": RESOURCE_API_VERSION}, method='POST') return True except BaseHTTPError as h: if h.code == 202: return True else: return False def destroy_node(self, node, ex_destroy_nic=True, ex_destroy_vhd=True): """ Destroy a node. :param node: The node to be destroyed :type node: :class:`.Node` :param ex_destroy_nic: Destroy the NICs associated with this node (default True). :type node: ``bool`` :param ex_destroy_vhd: Destroy the OS disk blob associated with this node (default True). :type node: ``bool`` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ # This returns a 202 (Accepted) which means that the delete happens # asynchronously. try: self.connection.request(node.id, params={"api-version": "2015-06-15"}, method='DELETE') except BaseHTTPError as h: if h.code == 202: pass else: return False # Need to poll until the node actually goes away. while True: try: time.sleep(10) self.connection.request( node.id, params={"api-version": RESOURCE_API_VERSION}) except BaseHTTPError as h: if h.code == 404: break else: return False # Optionally clean up the network # interfaces that were attached to this node. interfaces = \ node.extra["properties"]["networkProfile"]["networkInterfaces"] if ex_destroy_nic: for nic in interfaces: while True: try: self.connection.request( nic["id"], params={"api-version": RESOURCE_API_VERSION}, method='DELETE') break except BaseHTTPError as h: if h.code == 202: break inuse = h.message.startswith("[NicInUse]") if h.code == 400 and inuse: time.sleep(10) else: return False # Optionally clean up OS disk VHD. vhd = node.extra["properties"]["storageProfile"]["osDisk"].get("vhd") if ex_destroy_vhd and vhd is not None: while True: try: resourceGroup = node.id.split("/")[4] self._ex_delete_old_vhd( resourceGroup, vhd["uri"]) break except LibcloudError as e: if "LeaseIdMissing" in str(e): # Unfortunately lease errors # (which occur if the vhd blob # hasn't yet been released by the VM being destroyed) # get raised as plain # LibcloudError. Wait a bit and try again. time.sleep(10) else: raise return True def create_volume(self, size, name, location=None, snapshot=None, ex_resource_group=None, ex_account_type=None, ex_tags=None): """ Create a new managed volume. :param size: Size of volume in gigabytes. :type size: ``int`` :param name: Name of the volume to be created. :type name: ``str`` :param location: Which data center to create a volume in. (required) :type location: :class:`NodeLocation` :param snapshot: Snapshot from which to create the new volume. :type snapshot: :class:`VolumeSnapshot` :param ex_resource_group: The name of resource group in which to create the volume. (required) :type ex_resource_group: ``str`` :param ex_account_type: The Storage Account type, ``Standard_LRS``(HDD disks) or ``Premium_LRS``(SSD disks). :type ex_account_type: ``str`` :param ex_tags: Optional tags to associate with this resource. :type ex_tags: ``dict`` :return: The newly created volume. :rtype: :class:`StorageVolume` """ if location is None: raise ValueError("Must provide `location` value.") if ex_resource_group is None: raise ValueError("Must provide `ex_resource_group` value.") action = ( u'/subscriptions/{subscription_id}/resourceGroups/{resource_group}' u'/providers/Microsoft.Compute/disks/{volume_name}' ).format( subscription_id=self.subscription_id, resource_group=ex_resource_group, volume_name=name, ) tags = ex_tags if ex_tags is not None else {} creation_data = { 'createOption': 'Empty' } if snapshot is None else { 'createOption': 'Copy', 'sourceUri': snapshot.id } data = { 'location': location.id, 'tags': tags, 'properties': { 'creationData': creation_data, 'diskSizeGB': size } } if ex_account_type is not None: data['properties']['accountType'] = ex_account_type response = self.connection.request( action, method='PUT', params={ 'api-version': RESOURCE_API_VERSION, }, data=data ) return self._to_volume( response.object, name=name, ex_resource_group=ex_resource_group ) def list_volumes(self, ex_resource_group=None): """ Lists all the disks under a resource group or subscription. :param ex_resource_group: The identifier of your subscription where the managed disks are located. :type ex_resource_group: ``str`` :rtype: list of :class:`StorageVolume` """ if ex_resource_group: action = u'/subscriptions/{subscription_id}/resourceGroups' \ u'/{resource_group}/providers/Microsoft.Compute/disks' else: action = u'/subscriptions/{subscription_id}' \ u'/providers/Microsoft.Compute/disks' action = action.format( subscription_id=self.subscription_id, resource_group=ex_resource_group ) response = self.connection.request( action, method='GET', params={ 'api-version': RESOURCE_API_VERSION } ) return [self._to_volume(volume) for volume in response.object['value']] def attach_volume(self, node, volume, ex_lun=None, ex_vhd_uri=None, ex_vhd_create=False, **ex_kwargs): """ Attach a volume to node. :param node: A node to attach volume. :type node: :class:`Node` :param volume: A volume to attach. :type volume: :class:`StorageVolume` :param ex_lun: Specifies the logical unit number (LUN) location for the data drive in the virtual machine. Each data disk must have a unique LUN. :type ex_lun: ``int`` :param ex_vhd_uri: Attach old-style unmanaged disk from VHD blob. (optional) :type ex_vhd_uri: ``str`` :param ex_vhd_create: Create a new VHD blob for unmanaged disk. (optional) :type ex_vhd_create: ``bool`` :rtype: ``bool`` """ action = node.extra['id'] location = node.extra['location'] disks = node.extra['properties']['storageProfile']['dataDisks'] if ex_lun is None: # find the smallest unused logical unit number used_luns = [disk['lun'] for disk in disks] free_luns = [lun for lun in range(0, 63) if lun not in used_luns] if len(free_luns) > 0: ex_lun = free_luns[0] else: raise LibcloudError("No LUN available to attach new disk.") if ex_vhd_uri is not None: new_disk = { 'name': volume.name, 'diskSizeGB': volume.size, 'lun': ex_lun, 'createOption': 'empty' if ex_vhd_create else 'attach', 'vhd': {'uri': ex_vhd_uri}, } else: # attach existing managed disk new_disk = { 'lun': ex_lun, 'createOption': 'attach', 'managedDisk': {'id': volume.id}} disks.append(new_disk) self.connection.request( action, method='PUT', params={ 'api-version': RESOURCE_API_VERSION }, data={ 'properties': { 'storageProfile': { 'dataDisks': disks } }, 'location': location }) return True def detach_volume(self, volume, ex_node=None): """ Detach a managed volume from a node. """ if ex_node is None: raise ValueError("Must provide `ex_node` value.") action = ex_node.extra['id'] location = ex_node.extra['location'] disks = ex_node.extra['properties']['storageProfile']['dataDisks'] # remove volume from `properties.storageProfile.dataDisks` disks[:] = [ disk for disk in disks if disk.get('name') != volume.name and disk.get('managedDisk', {}).get('id') != volume.id ] self.connection.request( action, method='PUT', params={ 'api-version': RESOURCE_API_VERSION }, data={ 'properties': { 'storageProfile': { 'dataDisks': disks } }, 'location': location } ) return True def destroy_volume(self, volume): """ Delete a volume. """ self.ex_delete_resource(volume) return True def create_volume_snapshot(self, volume, name=None, location=None, ex_resource_group=None, ex_tags=None): """ Create snapshot from volume. :param volume: Instance of ``StorageVolume``. :type volume: :class`StorageVolume` :param name: Name of snapshot. (required) :type name: ``str`` :param location: Which data center to create a volume in. (required) :type location: :class:`NodeLocation` :param ex_resource_group: The name of resource group in which to create the snapshot. (required) :type ex_resource_group: ``str`` :param ex_tags: Optional tags to associate with this resource. :type ex_tags: ``dict`` :rtype: :class:`VolumeSnapshot` """ if name is None: raise ValueError("Must provide `name` value") if location is None: raise ValueError("Must provide `location` value") if ex_resource_group is None: raise ValueError("Must provide `ex_resource_group` value") snapshot_id = ( u'/subscriptions/{subscription_id}' u'/resourceGroups/{resource_group}' u'/providers/Microsoft.Compute' u'/snapshots/{snapshot_name}' ).format( subscription_id=self.subscription_id, resource_group=ex_resource_group, snapshot_name=name, ) tags = ex_tags if ex_tags is not None else {} data = { 'location': location.id, 'tags': tags, 'properties': { 'creationData': { 'createOption': 'Copy', 'sourceUri': volume.id }, } } response = self.connection.request( snapshot_id, method='PUT', data=data, params={ 'api-version': RESOURCE_API_VERSION }, ) return self._to_snapshot( response.object, name=name, ex_resource_group=ex_resource_group ) def list_volume_snapshots(self, volume): return [snapshot for snapshot in self.list_snapshots() if snapshot.extra['source_id'] == volume.id] def list_snapshots(self, ex_resource_group=None): """ Lists all the snapshots under a resource group or subscription. :param ex_resource_group: The identifier of your subscription where the managed snapshots are located (optional). :type ex_resource_group: ``str`` :rtype: list of :class:`VolumeSnapshot` """ if ex_resource_group: action = u'/subscriptions/{subscription_id}/resourceGroups' \ u'/{resource_group}/providers/Microsoft.Compute/snapshots' else: action = u'/subscriptions/{subscription_id}' \ u'/providers/Microsoft.Compute/snapshots' action = action.format( subscription_id=self.subscription_id, resource_group=ex_resource_group ) response = self.connection.request( action, method='GET', params={ 'api-version': RESOURCE_API_VERSION } ) return [self._to_snapshot(snap) for snap in response.object['value']] def destroy_volume_snapshot(self, snapshot): """ Delete a snapshot. """ self.ex_delete_resource(snapshot) return True def _to_volume(self, volume_obj, name=None, ex_resource_group=None): """ Parse the JSON element and return a StorageVolume object. :param volume_obj: A volume object from an azure response. :type volume_obj: ``dict`` :param name: An optional name for the volume. :type name: ``str`` :param ex_resource_group: An optional resource group for the volume. :type ex_resource_group: ``str`` :rtype: :class:`StorageVolume` """ volume_id = volume_obj.get('id') volume_name = volume_obj.get('name') extra = dict(volume_obj) properties = extra['properties'] size = properties.get('diskSizeGB') if size is not None: size = int(size) provisioning_state = properties.get('provisioningState', '').lower() disk_state = properties.get('diskState', '').lower() if provisioning_state == 'creating': state = StorageVolumeState.CREATING elif provisioning_state == 'updating': state = StorageVolumeState.UPDATING elif provisioning_state == 'succeeded': if disk_state in ('attached', 'reserved', 'activesas'): state = StorageVolumeState.INUSE elif disk_state == 'unattached': state = StorageVolumeState.AVAILABLE else: state = StorageVolumeState.UNKNOWN else: state = StorageVolumeState.UNKNOWN if volume_id is None \ and ex_resource_group is not None \ and name is not None: volume_id = ( u'/subscriptions/{subscription_id}' u'/resourceGroups/{resource_group}' u'/providers/Microsoft.Compute/disks/{volume_name}' ).format( subscription_id=self.subscription_id, resource_group=ex_resource_group, volume_name=name ) if volume_name is None and \ name is not None: volume_name = name return StorageVolume( id=volume_id, name=volume_name, size=size, driver=self, state=state, extra=extra ) def _to_snapshot(self, snapshot_obj, name=None, ex_resource_group=None): """ Parse the JSON element and return a VolumeSnapshot object. :param snapshot_obj: A snapshot object from an azure response. :type snapshot_obj: ``dict`` :param name: An optional name for the volume. :type name: ``str`` :param ex_resource_group: An optional resource group for the volume. :type ex_resource_group: ``str`` :rtype: :class:`VolumeSnapshot` """ snapshot_id = snapshot_obj.get('id') name = snapshot_obj.get('name', name) properties = snapshot_obj['properties'] size = properties.get('diskSizeGB') if size is not None: size = int(size) extra = dict(snapshot_obj) extra['source_id'] = properties['creationData']['sourceUri'] if '/providers/Microsoft.Compute/disks/' in extra['source_id']: extra['volume_id'] = extra['source_id'] state = self.SNAPSHOT_STATE_MAP.get( properties.get('provisioningState', '').lower(), VolumeSnapshotState.UNKNOWN ) try: created_at = iso8601.parse_date(properties.get('timeCreated')) except (TypeError, ValueError, iso8601.ParseError): created_at = None if snapshot_id is None \ and ex_resource_group is not None \ and name is not None: snapshot_id = ( u'/subscriptions/{subscription_id}' u'/resourceGroups/{resource_group}' u'/providers/Microsoft.Compute/snapshots/{snapshot_name}' ).format( subscription_id=self.subscription_id, resource_group=ex_resource_group, snapshot_name=name ) return VolumeSnapshot( snapshot_id, name=name, size=size, driver=self, state=state, extra=extra, created=created_at ) def ex_delete_resource(self, resource): """ Delete a resource. """ if not isinstance(resource, basestring): resource = resource.id self.connection.request( resource, method='DELETE', params={ 'api-version': RESOURCE_API_VERSION }, ) def ex_get_ratecard(self, offer_durable_id, currency='USD', locale='en-US', region='US'): """ Get rate card :param offer_durable_id: ID of the offer applicable for this user account. (e.g. "0026P") See http://azure.microsoft.com/en-us/support/legal/offer-details/ :type offer_durable_id: str :param currency: Desired currency for the response (default: "USD") :type currency: ``str`` :param locale: Locale (default: "en-US") :type locale: ``str`` :param region: Region (two-letter code) (default: "US") :type regions: ``str`` :return: A dictionary of rates whose ID's correspond to nothing at all :rtype: ``dict`` """ action = "/subscriptions/%s/providers/Microsoft.Commerce/" \ "RateCard" % (self.subscription_id,) params = {"api-version": "2016-08-31-preview", "$filter": "OfferDurableId eq 'MS-AZR-%s' and " "Currency eq '%s' and " "Locale eq '%s' and " "RegionInfo eq '%s'" % (offer_durable_id, currency, locale, region)} r = self.connection.request(action, params=params) return r.object def ex_list_publishers(self, location=None): """ List node image publishers. :param location: The location at which to list publishers (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :return: A list of tuples in the form ("publisher id", "publisher name") :rtype: ``list`` """ if location is None: if self.default_location: location = self.default_location else: raise ValueError("location is required.") action = "/subscriptions/%s/providers/Microsoft.Compute/" \ "locations/%s/publishers" \ % (self.subscription_id, location.id) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [(p["id"], p["name"]) for p in r.object] def ex_list_offers(self, publisher): """ List node image offers from a publisher. :param publisher: The complete resource path to a publisher (as returned by `ex_list_publishers`) :type publisher: ``str`` :return: A list of tuples in the form ("offer id", "offer name") :rtype: ``list`` """ action = "%s/artifacttypes/vmimage/offers" % (publisher) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [(p["id"], p["name"]) for p in r.object] def ex_list_skus(self, offer): """ List node image skus in an offer. :param offer: The complete resource path to an offer (as returned by `ex_list_offers`) :type publisher: ``str`` :return: A list of tuples in the form ("sku id", "sku name") :rtype: ``list`` """ action = "%s/skus" % (offer) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [(sku["id"], sku["name"]) for sku in r.object] def ex_list_image_versions(self, sku): """ List node image versions in a sku. :param sku: The complete resource path to a sku (as returned by `ex_list_skus`) :type publisher: ``str`` :return: A list of tuples in the form ("version id", "version name") :rtype: ``list`` """ action = "%s/versions" % (sku) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [(img["id"], img["name"]) for img in r.object] def ex_list_resource_groups(self): """ List resource groups. :return: A list of resource groups. :rtype: ``list`` of :class:`.AzureResourceGroup` """ action = "/subscriptions/%s/resourceGroups/" % (self.subscription_id) r = self.connection.request(action, params={"api-version": "2016-09-01"}) return [AzureResourceGroup(grp["id"], grp["name"], grp["location"], grp["properties"]) for grp in r.object["value"]] def ex_list_network_security_groups(self, resource_group): """ List network security groups. :param resource_group: List security groups in a specific resource group. :type resource_group: ``str`` :return: A list of network security groups. :rtype: ``list`` of :class:`.AzureNetworkSecurityGroup` """ action = "/subscriptions/%s/resourceGroups/%s/providers/" \ "Microsoft.Network/networkSecurityGroups" \ % (self.subscription_id, resource_group) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [AzureNetworkSecurityGroup(net["id"], net["name"], net["location"], net["properties"]) for net in r.object["value"]] def ex_create_network_security_group(self, name, resource_group, location=None): """ Update tags on any resource supporting tags. :param name: Name of the network security group to create :type name: ``str`` :param resource_group: The resource group to create the network security group in :type resource_group: ``str`` :param location: The location at which to create the network security group (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` """ if location is None and self.default_location: location = self.default_location else: raise ValueError("location is required.") target = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Network/networkSecurityGroups/%s" \ % (self.subscription_id, resource_group, name) data = { "location": location.id, } self.connection.request(target, params={"api-version": "2016-09-01"}, data=data, method='PUT') def ex_delete_network_security_group(self, name, resource_group, location=None): """ Update tags on any resource supporting tags. :param name: Name of the network security group to delete :type name: ``str`` :param resource_group: The resource group to create the network security group in :type resource_group: ``str`` :param location: The location at which to create the network security group (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` """ if location is None and self.default_location: location = self.default_location else: raise ValueError("location is required.") target = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Network/networkSecurityGroups/%s" \ % (self.subscription_id, resource_group, name) data = { "location": location.id, } self.connection.request(target, params={"api-version": "2016-09-01"}, data=data, method='DELETE') def ex_list_networks(self): """ List virtual networks. :return: A list of virtual networks. :rtype: ``list`` of :class:`.AzureNetwork` """ action = "/subscriptions/%s/providers/" \ "Microsoft.Network/virtualnetworks" \ % (self.subscription_id) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [AzureNetwork(net["id"], net["name"], net["location"], net["properties"]) for net in r.object["value"]] def ex_list_subnets(self, network): """ List subnets of a virtual network. :param network: The virtual network containing the subnets. :type network: :class:`.AzureNetwork` :return: A list of subnets. :rtype: ``list`` of :class:`.AzureSubnet` """ action = "%s/subnets" % (network.id) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [AzureSubnet(net["id"], net["name"], net["properties"]) for net in r.object["value"]] def ex_list_nics(self, resource_group=None): """ List available virtual network interface controllers in a resource group :param resource_group: List NICS in a specific resource group containing the NICs(optional). :type resource_group: ``str`` :return: A list of NICs. :rtype: ``list`` of :class:`.AzureNic` """ if resource_group is None: action = "/subscriptions/%s/providers/Microsoft.Network" \ "/networkInterfaces" % self.subscription_id else: action = "/subscriptions/%s/resourceGroups/%s/providers" \ "/Microsoft.Network/networkInterfaces" % \ (self.subscription_id, resource_group) r = self.connection.request( action, params={"api-version": RESOURCE_API_VERSION}) return [self._to_nic(net) for net in r.object["value"]] def ex_get_nic(self, id): """ Fetch information about a NIC. :param id: The complete resource path to the NIC resource. :type id: ``str`` :return: The NIC object :rtype: :class:`.AzureNic` """ r = self.connection.request(id, params={"api-version": "2015-06-15"}) return self._to_nic(r.object) def ex_get_node(self, id): """ Fetch information about a node. :param id: The complete resource path to the node resource. :type id: ``str`` :return: The Node object :rtype: :class:`.Node` """ r = self.connection.request( id, params={"api-version": RESOURCE_API_VERSION}) return self._to_node(r.object) def ex_get_volume(self, id): """ Fetch information about a volume. :param id: The complete resource path to the volume resource. :type id: ``str`` :return: The StorageVolume object :rtype: :class:`.StorageVolume` """ r = self.connection.request( id, params={"api-version": RESOURCE_API_VERSION}) return self._to_volume(r.object) def ex_get_snapshot(self, id): """ Fetch information about a snapshot. :param id: The complete resource path to the snapshot resource. :type id: ``str`` :return: The VolumeSnapshot object :rtype: :class:`.VolumeSnapshot` """ r = self.connection.request( id, params={"api-version": RESOURCE_API_VERSION}) return self._to_snapshot(r.object) def ex_get_public_ip(self, id): """ Fetch information about a public IP resource. :param id: The complete resource path to the public IP resource. :type id: ``str` :return: The public ip object :rtype: :class:`.AzureIPAddress` """ r = self.connection.request(id, params={"api-version": "2015-06-15"}) return self._to_ip_address(r.object) def ex_list_public_ips(self, resource_group): """ List public IP resources. :param resource_group: List public IPs in a specific resource group. :type resource_group: ``str`` :return: List of public ip objects :rtype: ``list`` of :class:`.AzureIPAddress` """ action = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Network/publicIPAddresses" \ % (self.subscription_id, resource_group) r = self.connection.request(action, params={"api-version": "2015-06-15"}) return [self._to_ip_address(net) for net in r.object["value"]] def ex_create_public_ip(self, name, resource_group, location=None, public_ip_allocation_method=None): """ Create a public IP resources. :param name: Name of the public IP resource :type name: ``str`` :param resource_group: The resource group to create the public IP :type resource_group: ``str`` :param location: The location at which to create the public IP (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :param public_ip_allocation_method: Call ex_create_public_ip with public_ip_allocation_method="Static" to create a static public IP address :type public_ip_allocation_method: ``str`` :return: The newly created public ip object :rtype: :class:`.AzureIPAddress` """ if location is None and self.default_location: location = self.default_location else: raise ValueError("location is required.") target = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Network/publicIPAddresses/%s" \ % (self.subscription_id, resource_group, name) data = { "location": location.id, "tags": {}, "properties": { "publicIPAllocationMethod": "Dynamic" } } if public_ip_allocation_method == "Static": data['properties']['publicIPAllocationMethod'] = "Static" r = self.connection.request(target, params={"api-version": "2015-06-15"}, data=data, method='PUT') return self._to_ip_address(r.object) def ex_create_network_interface(self, name, subnet, resource_group, location=None, public_ip=None): """ Create a virtual network interface (NIC). :param name: Name of the NIC resource :type name: ``str`` :param subnet: The subnet to attach the NIC :type subnet: :class:`.AzureSubnet` :param resource_group: The resource group to create the NIC :type resource_group: ``str`` :param location: The location at which to create the NIC (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :param public_ip: Associate a public IP resource with this NIC (optional). :type public_ip: :class:`.AzureIPAddress` :return: The newly created NIC :rtype: :class:`.AzureNic` """ if location is None: if self.default_location: location = self.default_location else: raise ValueError("location is required.") target = "/subscriptions/%s/resourceGroups/%s/providers" \ "/Microsoft.Network/networkInterfaces/%s" \ % (self.subscription_id, resource_group, name) data = { "location": location.id, "tags": {}, "properties": { "ipConfigurations": [{ "name": "myip1", "properties": { "subnet": { "id": subnet.id }, "privateIPAllocationMethod": "Dynamic" } }] } } if public_ip: ip_config = data["properties"]["ipConfigurations"][0] ip_config["properties"]["publicIPAddress"] = { "id": public_ip.id } r = self.connection.request(target, params={"api-version": "2015-06-15"}, data=data, method='PUT') return AzureNic(r.object["id"], r.object["name"], r.object["location"], r.object["properties"]) def ex_create_tags(self, resource, tags, replace=False): """ Update tags on any resource supporting tags. :param resource: The resource to update. :type resource: ``str`` or Azure object with an ``id`` attribute. :param tags: The tags to set. :type tags: ``dict`` :param replace: If true, replace all tags with the new tags. If false (default) add or update tags. :type replace: ``bool`` """ if not isinstance(resource, basestring): resource = resource.id r = self.connection.request( resource, params={"api-version": RESOURCE_API_VERSION}) if replace: r.object["tags"] = tags else: r.object["tags"].update(tags) self.connection.request( resource, data={"tags": r.object["tags"]}, params={"api-version": RESOURCE_API_VERSION}, method="PATCH") def ex_start_node(self, node): """ Start a stopped node. :param node: The node to be started :type node: :class:`.Node` """ target = "%s/start" % node.id r = self.connection.request(target, params={"api-version": "2015-06-15"}, method='POST') return r.object def ex_stop_node(self, node, deallocate=True): """ Stop a running node. :param node: The node to be stopped :type node: :class:`.Node` :param deallocate: If True (default) stop and then deallocate the node (release the hardware allocated to run the node). If False, stop the node but maintain the hardware allocation. If the node is not deallocated, the subscription will continue to be billed as if it were running. :type deallocate: ``bool`` """ if deallocate: target = "%s/deallocate" % node.id else: target = "%s/stop" % node.id r = self.connection.request(target, params={"api-version": "2015-06-15"}, method='POST') return r.object def ex_get_storage_account_keys(self, resource_group, storage_account): """ Get account keys required to access to a storage account (using AzureBlobsStorageDriver). :param resource_group: The resource group containing the storage account :type resource_group: ``str`` :param storage_account: Storage account to access :type storage_account: ``str`` :return: The account keys, in the form `{"key1": "XXX", "key2": "YYY"}` :rtype: ``.dict`` """ action = "/subscriptions/%s/resourceGroups/%s/" \ "providers/Microsoft.Storage/storageAccounts/%s/listKeys" \ % (self.subscription_id, resource_group, storage_account) r = self.connection.request(action, params={ "api-version": "2015-05-01-preview"}, method="POST") return r.object def ex_run_command(self, node, command, filerefs=[], timestamp=0, storage_account_name=None, storage_account_key=None, location=None): """ Run a command on the node as root. Does not require ssh to log in, uses Windows Azure Agent (waagent) running on the node. :param node: The node on which to run the command. :type node: :class:``.Node`` :param command: The actual command to run. Note this is parsed into separate arguments according to shell quoting rules but is executed directly as a subprocess, not a shell command. :type command: ``str`` :param filerefs: Optional files to fetch by URI from Azure blob store (must provide storage_account_name and storage_account_key), or regular HTTP. :type command: ``list`` of ``str`` :param location: The location of the virtual machine (if None, use default location specified as 'region' in __init__) :type location: :class:`.NodeLocation` :param storage_account_name: The storage account from which to fetch files in `filerefs` :type storage_account_name: ``str`` :param storage_account_key: The storage key to authorize to the blob store. :type storage_account_key: ``str`` :type: ``list`` of :class:`.NodeLocation` """ if location is None: if self.default_location: location = self.default_location else: raise ValueError("location is required.") name = "init" target = node.id + "/extensions/" + name data = { "location": location.id, "name": name, "properties": { "publisher": "Microsoft.OSTCExtensions", "type": "CustomScriptForLinux", "typeHandlerVersion": "1.3", "settings": { "fileUris": filerefs, "commandToExecute": command, "timestamp": timestamp } } } if storage_account_name and storage_account_key: data["properties"]["protectedSettings"] = { "storageAccountName": storage_account_name, "storageAccountKey": storage_account_key} r = self.connection.request(target, params={"api-version": "2015-06-15"}, data=data, method='PUT') return r.object def _ex_delete_old_vhd(self, resource_group, uri): try: (storageAccount, blobContainer, blob) = _split_blob_uri(uri) keys = self.ex_get_storage_account_keys(resource_group, storageAccount) blobdriver = AzureBlobsStorageDriver( storageAccount, keys["key1"], host="%s.blob%s" % (storageAccount, self.connection.storage_suffix)) blobdriver.delete_object(blobdriver.get_object(blobContainer, blob)) return True except ObjectDoesNotExistError: return True def _ex_connection_class_kwargs(self): kwargs = super(AzureNodeDriver, self)._ex_connection_class_kwargs() kwargs['tenant_id'] = self.tenant_id kwargs['subscription_id'] = self.subscription_id kwargs["cloud_environment"] = self.cloud_environment return kwargs def _to_node(self, data, fetch_nic=True): private_ips = [] public_ips = [] nics = data["properties"]["networkProfile"]["networkInterfaces"] if fetch_nic: for nic in nics: try: n = self.ex_get_nic(nic["id"]) priv = n.extra["ipConfigurations"][0]["properties"] \ .get("privateIPAddress") if priv: private_ips.append(priv) pub = n.extra["ipConfigurations"][0]["properties"].get( "publicIPAddress") if pub: pub_addr = self.ex_get_public_ip(pub["id"]) addr = pub_addr.extra.get("ipAddress") if addr: public_ips.append(addr) except BaseHTTPError: pass state = NodeState.UNKNOWN try: action = "%s/InstanceView" % (data["id"]) r = self.connection.request(action, params={"api-version": "2015-06-15"}) for status in r.object["statuses"]: if status["code"] == "ProvisioningState/creating": state = NodeState.PENDING break elif status["code"] == "ProvisioningState/deleting": state = NodeState.TERMINATED break elif status["code"].startswith("ProvisioningState/failed"): state = NodeState.ERROR break elif status["code"] == "ProvisioningState/updating": state = NodeState.UPDATING break elif status["code"] == "ProvisioningState/succeeded": pass if status["code"] == "PowerState/deallocated": state = NodeState.STOPPED break elif status["code"] == "PowerState/deallocating": state = NodeState.PENDING break elif status["code"] == "PowerState/running": state = NodeState.RUNNING except BaseHTTPError: pass node = Node(data["id"], data["name"], state, public_ips, private_ips, driver=self.connection.driver, extra=data) return node def _to_node_size(self, data): return NodeSize(id=data["name"], name=data["name"], ram=data["memoryInMB"], # convert to disk from MB to GB disk=data["resourceDiskSizeInMB"] / 1024, bandwidth=0, price=0, driver=self.connection.driver, extra={"numberOfCores": data["numberOfCores"], "osDiskSizeInMB": data["osDiskSizeInMB"], "maxDataDiskCount": data["maxDataDiskCount"]}) def _to_nic(self, data): return AzureNic(data["id"], data["name"], data["location"], data["properties"]) def _to_ip_address(self, data): return AzureIPAddress(data["id"], data["name"], data["properties"]) def _to_location(self, loc): # XXX for some reason the API returns location names like # "East US" instead of "eastus" which is what is actually needed # for other API calls, so do a name->id fixup. loc_id = loc.lower().replace(" ", "") return NodeLocation(loc_id, loc, self._location_to_country.get(loc_id), self.connection.driver) def _get_instance_vhd(self, name, ex_resource_group, ex_storage_account, ex_blob_container="vhds"): n = 0 while True: try: instance_vhd = "https://%s.blob%s" \ "/%s/%s-os_%i.vhd" \ % (ex_storage_account, self.connection.storage_suffix, ex_blob_container, name, n) self._ex_delete_old_vhd(ex_resource_group, instance_vhd) return instance_vhd except LibcloudError: n += 1 def _split_blob_uri(uri): uri = uri.split('/') storage_account = uri[2].split('.')[0] blob_container = uri[3] blob_name = '/'.join(uri[4:]) return storage_account, blob_container, blob_name apache-libcloud-2.2.1/libcloud/compute/drivers/gridspot.py0000664000175000017500000001004712701023453023551 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeState from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import Provider from libcloud.common.types import InvalidCredsError class GridspotAPIException(Exception): def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) class GridspotResponse(JsonResponse): """ Response class for Gridspot """ def parse_body(self): body = super(GridspotResponse, self).parse_body() if 'exception_name' in body and body['exception_name']: raise GridspotAPIException(body['exception_name']) return body def parse_error(self): # Gridspot 404s on invalid api key or instance_id raise InvalidCredsError("Invalid api key/instance_id") class GridspotConnection(ConnectionKey): """ Connection class to connect to Gridspot's API servers """ host = 'gridspot.com' responseCls = GridspotResponse def add_default_params(self, params): params['api_key'] = self.key return params class GridspotNodeDriver(NodeDriver): """ Gridspot (http://www.gridspot.com/) node driver. """ type = Provider.GRIDSPOT name = 'Gridspot' website = 'http://www.gridspot.com/' connectionCls = GridspotConnection NODE_STATE_MAP = { 'Running': NodeState.RUNNING, 'Starting': NodeState.PENDING } def list_nodes(self): data = self.connection.request( '/compute_api/v1/list_instances').object return [self._to_node(n) for n in data['instances']] def destroy_node(self, node): data = {'instance_id': node.id} self.connection.request('/compute_api/v1/stop_instance', data).object return True def _get_node_state(self, state): result = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN) return result def _add_int_param(self, params, data, field): if data[field]: try: params[field] = int(data[field]) except: pass def _to_node(self, data): port = None ip = None state = self._get_node_state(data['current_state']) if data['vm_ssh_wan_ip_endpoint'] != 'null': parts = data['vm_ssh_wan_ip_endpoint'].split(':') ip = parts[0] port = int(parts[1]) extra_params = { 'winning_bid_id': data['winning_bid_id'], 'port': port } # Spec is vague and doesn't indicate if these will always be present self._add_int_param(extra_params, data, 'vm_num_logical_cores') self._add_int_param(extra_params, data, 'vm_num_physical_cores') self._add_int_param(extra_params, data, 'vm_ram') self._add_int_param(extra_params, data, 'start_state_time') self._add_int_param(extra_params, data, 'ended_state_time') self._add_int_param(extra_params, data, 'running_state_time') return Node( id=data['instance_id'], name=data['instance_id'], state=state, public_ips=[ip], private_ips=[], driver=self.connection.driver, extra=extra_params) apache-libcloud-2.2.1/libcloud/compute/drivers/gce.py0000664000175000017500000130527413153541406022473 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for Google Compute Engine Driver. """ from __future__ import with_statement import datetime import time import sys from libcloud.common.base import LazyObject from libcloud.common.google import GoogleOAuth2Credential from libcloud.common.google import GoogleResponse from libcloud.common.google import GoogleBaseConnection from libcloud.common.google import GoogleBaseError from libcloud.common.google import ResourceNotFoundError from libcloud.common.google import ResourceExistsError from libcloud.common.types import ProviderError from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot from libcloud.compute.base import UuidMixin from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.utils.iso8601 import parse_date API_VERSION = 'v1' DEFAULT_TASK_COMPLETION_TIMEOUT = 180 def timestamp_to_datetime(timestamp): """ Return a datetime object that corresponds to the time in an RFC3339 timestamp. :param timestamp: RFC3339 timestamp string :type timestamp: ``str`` :return: Datetime object corresponding to timestamp :rtype: :class:`datetime.datetime` """ # We remove timezone offset and microseconds (Python 2.5 strptime doesn't # support %f) ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') tz_hours = int(timestamp[-5:-3]) tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1') tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins) return ts + tz_delta class GCEResponse(GoogleResponse): pass class GCEConnection(GoogleBaseConnection): """ Connection class for the GCE driver. GCEConnection extends :class:`google.GoogleBaseConnection` for 3 reasons: 1. modify request_path for GCE URI. 2. Implement gce_params functionality described below. 3. Add request_aggregated_items method for making aggregated API calls. If the parameter gce_params is set to a dict prior to calling request(), the URL parameters will be updated to include those key/values FOR A SINGLE REQUEST. If the response contains a nextPageToken, gce_params['pageToken'] will be set to its value. This can be used to implement paging in list: >>> params, more_results = {'maxResults': 2}, True >>> while more_results: ... driver.connection.gce_params=params ... driver.ex_list_urlmaps() ... more_results = 'pageToken' in params ... [, ] [] """ host = 'www.googleapis.com' responseCls = GCEResponse def __init__(self, user_id, key, secure, auth_type=None, credential_file=None, project=None, **kwargs): super(GCEConnection, self).__init__( user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, **kwargs) self.request_path = '/compute/%s/projects/%s' % (API_VERSION, project) self.gce_params = None def pre_connect_hook(self, params, headers): """ Update URL parameters with values from self.gce_params. @inherits: :class:`GoogleBaseConnection.pre_connect_hook` """ params, headers = super(GCEConnection, self).pre_connect_hook(params, headers) if self.gce_params: params.update(self.gce_params) return params, headers def request(self, *args, **kwargs): """ Perform request then do GCE-specific processing of URL params. @inherits: :class:`GoogleBaseConnection.request` """ response = super(GCEConnection, self).request(*args, **kwargs) # If gce_params has been set, then update the pageToken with the # nextPageToken so it can be used in the next request. if self.gce_params: if 'nextPageToken' in response.object: self.gce_params['pageToken'] = response.object['nextPageToken'] elif 'pageToken' in self.gce_params: del self.gce_params['pageToken'] self.gce_params = None return response def request_aggregated_items(self, api_name): """ Perform request(s) to obtain all results from 'api_name'. This method will make requests to the aggregated 'api_name' until all results are received. It will then, through a helper function, combine all results and return a single 'items' dictionary. :param api_name: Name of API to call. Consult API docs for valid names. :type api_name: ``str`` :return: dict in the format of the API response. format: { 'items': {'key': {api_name: []}} } ex: { 'items': {'zones/us-central1-a': {disks: []}} } :rtype: ``dict`` """ request_path = "/aggregated/%s" % api_name api_responses = [] params = {'maxResults': 500} more_results = True while more_results: self.gce_params = params response = self.request(request_path, method='GET').object if 'items' in response: api_responses.append(response) more_results = 'pageToken' in params return self._merge_response_items(api_name, api_responses) def _merge_response_items(self, list_name, response_list): """ Take a list of API responses ("item"-portion only) and combine them. Helper function to combine multiple aggegrated responses into a single dictionary that resembles an API response. Note: keys that don't have a 'list_name" key (including warnings) are omitted. :param list_name: Name of list in dict. Practically, this is the name of the API called (e.g. 'disks'). :type list_name: ``str`` :param response_list: list of API responses (e.g. resp['items']). Each entry in the list is the result of a single API call. Expected format is: [ { items: { key1: { api_name:[]}, key2: { api_name:[]} }}, ... ] :type response_list: ``dict`` :return: dict in the format of: { items: {key: {api_name:[]}, key2: {api_name:[]}} } ex: { items: { 'us-east1-a': {'disks': []}, 'us-east1-b': {'disks': []} }} :rtype: ``dict`` """ merged_items = {} for resp in response_list: if 'items' in resp: # example k would be a zone or region name # example v would be { "disks" : [], "otherkey" : "..." } for k, v in resp['items'].items(): if list_name in v: merged_items.setdefault(k, {}).setdefault( list_name, []) # Combine the list with the existing list. merged_items[k][list_name] += v[list_name] return {'items': merged_items} class GCEList(object): """ An Iterator that wraps list functions to provide additional features. GCE enforces a limit on the number of objects returned by a list operation, so users with more than 500 objects of a particular type will need to use filter(), page() or both. >>> l=GCEList(driver, driver.ex_list_urlmaps) >>> for sublist in l.filter('name eq ...-map').page(1): ... sublist ... [] [] One can create a GCEList manually, but it's slightly easier to use the ex_list() method of :class:`GCENodeDriver`. """ def __init__(self, driver, list_fn, **kwargs): """ :param driver: An initialized :class:``GCENodeDriver`` :type driver: :class:``GCENodeDriver`` :param list_fn: A bound list method from :class:`GCENodeDriver`. :type list_fn: ``instancemethod`` """ self.driver = driver self.list_fn = list_fn self.kwargs = kwargs self.params = {} def __iter__(self): list_fn = self.list_fn more_results = True while more_results: self.driver.connection.gce_params = self.params yield list_fn(**self.kwargs) more_results = 'pageToken' in self.params def __repr__(self): return '' % (self.list_fn.__name__, repr(self.params)) def filter(self, expression): """ Filter results of a list operation. GCE supports server-side filtering of resources returned by a list operation. Syntax of the filter expression is fully described in the GCE API reference doc, but in brief it is:: FIELD_NAME COMPARISON_STRING LITERAL_STRING where FIELD_NAME is the resource's property name, COMPARISON_STRING is 'eq' or 'ne', and LITERAL_STRING is a regular expression in RE2 syntax. >>> for sublist in l.filter('name eq ...-map'): ... sublist ... [, \ ] API reference: https://cloud.google.com/compute/docs/reference/latest/ RE2 syntax: https://github.com/google/re2/blob/master/doc/syntax.txt :param expression: Filter expression described above. :type expression: ``str`` :return: This :class:`GCEList` instance :rtype: :class:`GCEList` """ self.params['filter'] = expression return self def page(self, max_results=500): """ Limit the number of results by each iteration. This implements the paging functionality of the GCE list methods and returns this GCEList instance so that results can be chained: >>> for sublist in GCEList(driver, driver.ex_list_urlmaps).page(2): ... sublist ... [, \ ] [] :keyword max_results: Maximum number of results to return per iteration. Defaults to the GCE default of 500. :type max_results: ``int`` :return: This :class:`GCEList` instance :rtype: :class:`GCEList` """ self.params['maxResults'] = max_results return self class GCELicense(UuidMixin, LazyObject): """A GCE License used to track software usage in GCE nodes.""" def __init__(self, name, project, driver): UuidMixin.__init__(self) self.id = name self.name = name self.project = project self.driver = driver self.charges_use_fee = None # init in _request self.extra = None # init in _request self._request() def _request(self): # TODO(crunkleton@google.com): create new connection? or make # connection thread-safe? Saving, modifying, and restoring # driver.connection.request_path is really hacky and thread-unsafe. saved_request_path = self.driver.connection.request_path try: new_request_path = saved_request_path.replace(self.driver.project, self.project) self.driver.connection.request_path = new_request_path request = '/global/licenses/%s' % self.name response = self.driver.connection.request(request, method='GET').object except: raise finally: # Restore the connection request_path self.driver.connection.request_path = saved_request_path self.extra = { 'selfLink': response.get('selfLink'), 'kind': response.get('kind') } self.charges_use_fee = response['chargesUseFee'] def destroy(self): raise ProviderError("Can not destroy a License resource.") def __repr__(self): return '' % ( self.id, self.name, self.charges_use_fee) class GCEDiskType(UuidMixin): """A GCE DiskType resource.""" def __init__(self, id, name, zone, driver, extra=None): self.id = str(id) self.name = name self.zone = zone self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): raise ProviderError("Can not destroy a DiskType resource.") def __repr__(self): return '' % ( self.id, self.name, self.zone) class GCEAddress(UuidMixin): """A GCE Static address.""" def __init__(self, id, name, address, region, driver, extra=None): self.id = str(id) self.name = name self.address = address self.region = region self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this address. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_address(address=self) def __repr__(self): return '' % ( self.id, self.name, self.address, (hasattr(self.region, "name") and self.region.name or self.region)) class GCEBackend(UuidMixin): """A GCE Backend. Only used for creating Backend Services.""" def __init__(self, instance_group, balancing_mode='UTILIZATION', max_utilization=None, max_rate=None, max_rate_per_instance=None, capacity_scaler=1, description=None): if isinstance(instance_group, GCEInstanceGroup): self.instance_group = instance_group elif isinstance(instance_group, GCEInstanceGroupManager): self.instance_group = instance_group.instance_group else: raise ValueError('instance_group must be of type GCEInstanceGroup' 'or of type GCEInstanceGroupManager') self.instance_group = instance_group self.balancing_mode = balancing_mode self.max_utilization = max_utilization self.max_rate = max_rate self.max_rate_per_instance = max_rate_per_instance self.capacity_scaler = capacity_scaler # 'id' and 'name' aren't actually used or provided by the GCE API. # We create them for convenience. self.id = self._gen_id() self.name = self.id self.description = description or self.name UuidMixin.__init__(self) def _gen_id(self): """ Use the Instance Group information to fill in name and id fields. :return: id in the format of: ZONE/instanceGroups/INSTANCEGROUPNAME Ex: us-east1-c/instanceGroups/my-instance-group :rtype: ``str`` """ zone_name = self.instance_group.zone.name return "%s/instanceGroups/%s" % (zone_name, self.instance_group.name) def to_backend_dict(self): """ Returns dict formatted for inclusion in Backend Service Request. :return: dict formatted as a list entry for Backend Service 'backend'. :rtype: ``dict`` """ d = {} d['group'] = self.instance_group.extra['selfLink'] if self.balancing_mode: d['balancingMode'] = self.balancing_mode if self.max_utilization: d['maxUtilization'] = self.max_utilization if self.max_rate: d['maxRate'] = self.max_rate if self.max_rate_per_instance: d['maxRatePerInstance'] = self.max_rate_per_instance if self.capacity_scaler: d['capacityScaler'] = self.capacity_scaler return d def __repr__(self): return '' % ( self.id, self.balancing_mode) class GCEBackendService(UuidMixin): """A GCE Backend Service.""" def __init__(self, id, name, backends, healthchecks, port, port_name, protocol, timeout, driver, extra=None): self.id = str(id) self.name = name self.backends = backends or [] self.healthchecks = healthchecks or [] self.port = port self.port_name = port_name self.protocol = protocol self.timeout = timeout self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return '' % (self.id, self.name) def destroy(self): """ Destroy this Backend Service. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_backendservice(backendservice=self) class GCEFailedDisk(object): """Dummy Node object for disks that are not created.""" def __init__(self, name, error, code): self.name = name self.error = error self.code = code def __repr__(self): return '' % (self.name, self.code) class GCEFailedNode(object): """Dummy Node object for nodes that are not created.""" def __init__(self, name, error, code): self.name = name self.error = error self.code = code def __repr__(self): return '' % (self.name, self.code) class GCEHealthCheck(UuidMixin): """A GCE Http Health Check class.""" def __init__(self, id, name, path, port, interval, timeout, unhealthy_threshold, healthy_threshold, driver, extra=None): self.id = str(id) self.name = name self.path = path self.port = port self.interval = interval self.timeout = timeout self.unhealthy_threshold = unhealthy_threshold self.healthy_threshold = healthy_threshold self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def destroy(self): """ Destroy this Health Check. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_healthcheck(healthcheck=self) def update(self): """ Commit updated healthcheck values. :return: Updated Healthcheck object :rtype: :class:`GCEHealthcheck` """ return self.driver.ex_update_healthcheck(healthcheck=self) def __repr__(self): return '' % ( self.id, self.name, self.path, self.port) class GCEFirewall(UuidMixin): """A GCE Firewall rule class.""" def __init__(self, id, name, allowed, network, source_ranges, source_tags, target_tags, driver, extra=None): self.id = str(id) self.name = name self.network = network self.allowed = allowed self.source_ranges = source_ranges self.source_tags = source_tags self.target_tags = target_tags self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this firewall. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_firewall(firewall=self) def update(self): """ Commit updated firewall values. :return: Updated Firewall object :rtype: :class:`GCEFirewall` """ return self.driver.ex_update_firewall(firewall=self) def __repr__(self): return '' % ( self.id, self.name, self.network.name) class GCEForwardingRule(UuidMixin): def __init__(self, id, name, region, address, protocol, targetpool, driver, extra=None): self.id = str(id) self.name = name self.region = region self.address = address self.protocol = protocol # TODO: 'targetpool' should more correctly be 'target' since a # forwarding rule's target can be something besides a targetpool self.targetpool = targetpool self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this Forwarding Rule :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_forwarding_rule(forwarding_rule=self) def __repr__(self): return '' % ( self.id, self.name, self.address) class GCENodeImage(NodeImage): """A GCE Node Image class.""" def __init__(self, id, name, driver, extra=None): super(GCENodeImage, self).__init__(id, name, driver, extra=extra) def delete(self): """ Delete this image :return: True if successful :rtype: ``bool`` """ return self.driver.ex_delete_image(image=self) def deprecate(self, replacement, state, deprecated=None, obsolete=None, deleted=None): """ Deprecate this image :param replacement: Image to use as a replacement :type replacement: ``str`` or :class: `GCENodeImage` :param state: Deprecation state of this image. Possible values include \'ACTIVE\', \'DELETED\', \'DEPRECATED\' or \'OBSOLETE\'. :type state: ``str`` :param deprecated: RFC3339 timestamp to mark DEPRECATED :type deprecated: ``str`` or ``None`` :param obsolete: RFC3339 timestamp to mark OBSOLETE :type obsolete: ``str`` or ``None`` :param deleted: RFC3339 timestamp to mark DELETED :type deleted: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_deprecate_image(self, replacement, state, deprecated, obsolete, deleted) class GCESslCertificate(UuidMixin): """ GCESslCertificate represents the SslCertificate resource. """ def __init__(self, id, name, certificate, driver, extra, private_key=None, description=None): """ :param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :type name: ``str`` :param certificate: A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. :type certificate: ``str`` :param private_key: A write-only private key in PEM format. Only insert RPCs will include this field. :type private_key: ``str`` :keyword description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :keyword driver: An initialized :class: `GCENodeDriver` :type driver: :class:`:class: `GCENodeDriver`` :keyword extra: A dictionary of extra information. :type extra: ``:class: ``dict```` """ self.name = name self.certificate = certificate self.private_key = private_key self.description = description self.driver = driver self.extra = extra UuidMixin.__init__(self) def __repr__(self): return '' % (self.name) def destroy(self): """ Destroy this SslCertificate. :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_destroy_sslcertificate(sslcertificate=self) class GCESubnetwork(UuidMixin): """A GCE Subnetwork object class.""" def __init__(self, id, name, cidr, network, region, driver, extra=None): self.id = str(id) self.name = name self.cidr = cidr self.network = network self.region = region self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this subnetwork :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_subnetwork(self) def __repr__(self): return '' % (self.id, self.name, self.region.name, self.network.name, self.cidr) class GCENetwork(UuidMixin): """A GCE Network object class.""" def __init__(self, id, name, cidr, driver, extra=None): self.id = str(id) self.name = name self.cidr = cidr self.driver = driver self.extra = extra self.mode = 'legacy' self.subnetworks = [] if 'mode' in extra and extra['mode'] != 'legacy': self.mode = extra['mode'] self.subnetworks = extra['subnetworks'] UuidMixin.__init__(self) def destroy(self): """ Destroy this network :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_network(network=self) def __repr__(self): return '' % ( self.id, self.name, self.cidr, self.mode) class GCERoute(UuidMixin): """A GCE Route object class.""" def __init__(self, id, name, dest_range, priority, network="default", tags=None, driver=None, extra=None): self.id = str(id) self.name = name self.dest_range = dest_range self.priority = priority self.network = network self.tags = tags self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this route :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_route(route=self) def __repr__(self): return '' % ( self.id, self.name, self.dest_range, hasattr(self.network, 'name') and self.network.name or self.network) class GCENodeSize(NodeSize): """A GCE Node Size (MachineType) class.""" def __init__(self, id, name, ram, disk, bandwidth, price, driver, extra=None): self.extra = extra super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth, price, driver, extra=extra) class GCEProject(UuidMixin): """GCE Project information.""" def __init__(self, id, name, metadata, quotas, driver, extra=None): self.id = str(id) self.name = name self.metadata = metadata self.quotas = quotas self.driver = driver self.extra = extra UuidMixin.__init__(self) def set_common_instance_metadata(self, metadata=None, force=False): """ Set common instance metadata for the project. Common uses are for setting 'sshKeys', or setting a project-wide 'startup-script' for all nodes (instances). Passing in ``None`` for the 'metadata' parameter will clear out all common instance metadata *except* for 'sshKeys'. If you also want to update 'sshKeys', set the 'force' parameter to ``True``. :param metadata: Dictionary of metadata. Can be either a standard python dictionary, or the format expected by GCE (e.g. {'items': [{'key': k1, 'value': v1}, ...}] :type metadata: ``dict`` or ``None`` :param force: Force update of 'sshKeys'. If force is ``False`` (the default), existing sshKeys will be retained. Setting force to ``True`` will either replace sshKeys if a new a new value is supplied, or deleted if no new value is supplied. :type force: ``bool`` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_set_common_instance_metadata(self, metadata) def set_usage_export_bucket(self, bucket, prefix=None): """ Used to retain Compute Engine resource usage, storing the CSV data in a Google Cloud Storage bucket. See the `docs `_ for more information. Please ensure you have followed the necessary setup steps prior to enabling this feature (e.g. bucket exists, ACLs are in place, etc.) :param bucket: Name of the Google Cloud Storage bucket. Specify the name in either 'gs://' or the full URL 'https://storage.googleapis.com/'. :type bucket: ``str`` :param prefix: Optional prefix string for all reports. :type prefix: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_set_usage_export_bucket(self, bucket, prefix) def __repr__(self): return '' % (self.id, self.name) class GCERegion(UuidMixin): def __init__(self, id, name, status, zones, quotas, deprecated, driver, extra=None): self.id = str(id) self.name = name self.status = status self.zones = zones self.quotas = quotas self.deprecated = deprecated self.driver = driver self.extra = extra UuidMixin.__init__(self) def __repr__(self): return '' % ( self.id, self.name, self.status) class GCESnapshot(VolumeSnapshot): def __init__(self, id, name, size, status, driver, extra=None, created=None): self.status = status super(GCESnapshot, self).__init__(id, driver, size, extra, created, name=name) class GCETargetHttpProxy(UuidMixin): def __init__(self, id, name, urlmap, driver, extra=None): self.id = str(id) self.name = name self.urlmap = urlmap self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return '' % (self.id, self.name) def destroy(self): """ Destroy this Target HTTP Proxy. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_targethttpproxy(targethttpproxy=self) class GCETargetHttpsProxy(UuidMixin): """ GCETargetHttpsProxy represents the TargetHttpsProxy resource. """ def __init__(self, id, name, description=None, sslcertificates=None, urlmap=None, driver=None, extra=None): """ :param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :type name: ``str`` :param description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :param sslcertificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified. :type sslcertificates: ``list`` of :class:`GCESslcertificates` :param urlmap: A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: - ht tps://www.googleapis.compute/v1/projects/project/gl obal/urlMaps/url-map - projects/project/global/urlMaps/url-map - global/urlMaps/url-map :type urlmap: :class:`GCEUrlMap` :keyword driver: An initialized :class: `GCENodeDriver` :type driver: :class:`:class: `GCENodeDriver`` :keyword extra: A dictionary of extra information. :type extra: ``:class: ``dict```` """ self.name = name self.description = description self.sslcertificates = sslcertificates self.urlmap = urlmap self.driver = driver self.extra = extra UuidMixin.__init__(self) def __repr__(self): return '' % (self.name) def set_sslcertificates(self, sslcertificates): """ Set the SSL Certificates for this TargetHTTPSProxy :param sslcertificates: SSL Certificates to set. :type sslcertificates: ``list`` of :class:`GCESslCertificate` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targethttpsproxy_set_sslcertificates( targethttpsproxy=self, sslcertificates=sslcertificates) def set_urlmap(self, urlmap): """ Changes the URL map for TargetHttpsProxy. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param targethttpsproxy: Name of the TargetHttpsProxy resource whose URL map is to be set. :type targethttpsproxy: ``str`` :param urlmap: UrlMap to set. :type urlmap: :class:`GCEUrlMap` :return: True :rtype: ``bool`` """ return self.driver.ex_targethttpsproxy_set_urlmap( targethttpsproxy=self, urlmap=urlmap) def destroy(self): """ Destroy this TargetHttpsProxy. :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_destroy_targethttpsproxy(targethttpsproxy=self) class GCETargetInstance(UuidMixin): def __init__(self, id, name, zone, node, driver, extra=None): self.id = str(id) self.name = name self.zone = zone self.node = node self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this Target Instance :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_targetinstance(targetinstance=self) def __repr__(self): return '' % ( self.id, self.name, self.zone.name, (hasattr(self.node, 'name') and self.node.name or self.node)) class GCEAutoscaler(UuidMixin): """Represents a autoscaling policy object used to scale Instance Groups.""" def __init__(self, id, name, zone, target, policy, driver, extra=None): self.id = str(id) self.name = name self.zone = zone self.target = target self.policy = policy self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this Autoscaler. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_autoscaler(autoscaler=self) def __repr__(self): return '' % ( self.id, self.name, self.zone.name, self.target.name) class GCEInstanceTemplate(UuidMixin): """Represents a machine configuration used in creating Instance Groups.""" def __init__(self, id, name, driver, extra=None): self.id = str(id) self.name = name self.driver = driver self.extra = extra UuidMixin.__init__(self) def __repr__(self): return '' % ( self.id, self.name, self.extra['properties'].get('machineType', 'UNKNOWN')) def destroy(self): """ Destroy this InstanceTemplate. :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_destroy_instancetemplate(instancetemplate=self) class GCEInstanceGroup(UuidMixin): """ GCEInstanceGroup represents the InstanceGroup resource. """ def __init__(self, id, name, zone, driver, extra=None, network=None, subnetwork=None, named_ports=None): """ :param name: Required. The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035. :type name: ``str`` :param zone: The URL of the zone where the instance group is located. :type zone: :class:`GCEZone` :param network: The URL of the network to which all instances in the instance group belong. :type network: :class:`GCENetwork` :param subnetwork: The URL of the subnetwork to which all instances in the instance group belong. :type subnetwork: :class:`GCESubnetwork` :param named_ports: Assigns a name to a port number. For example: {name: "http", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group. :type named_ports: ``""`` """ self.name = name self.zone = zone self.network = network self.subnetwork = subnetwork self.named_ports = named_ports self.driver = driver self.extra = extra UuidMixin.__init__(self) def __repr__(self): return '' % (self.name, self.zone.name) def destroy(self): """ Destroy this InstanceGroup. :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_destroy_instancegroup(instancegroup=self) def add_instances(self, node_list): """ Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The Instance Group where you are adding instances. :type instancegroup: :class:``GCEInstanceGroup`` :param node_list: List of nodes to add. :type node_list: ``list`` of :class:`Node` or ``list`` of :class:`GCENode` :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_instancegroup_add_instances(instancegroup=self, node_list=node_list) def list_instances(self): """ Lists the instances in the specified instance group. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :return: List of :class:`GCENode` objects. :rtype: ``list`` of :class:`GCENode` objects. """ return self.driver.ex_instancegroup_list_instances(instancegroup=self) def remove_instances(self, node_list): """ Removes one or more instances from the specified instance group, but does not delete those instances. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The Instance Group where you are removng instances. :type instancegroup: :class:``GCEInstanceGroup`` :param node_list: List of nodes to add. :type node_list: ``list`` of :class:`Node` or ``list`` of :class:`GCENode` :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_instancegroup_remove_instances( instancegroup=self, node_list=node_list) def set_named_ports(self, named_ports): """ Sets the named ports for the specified instance group. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param named_ports: Assigns a name to a port number. For example: {name: "http", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group. :type named_ports: ``list`` of {'name': ``str``, 'port`: ``int``} :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_instancegroup_set_named_ports( instancegroup=self, named_ports=named_ports) class GCEInstanceGroupManager(UuidMixin): """ GCE Instance Groups Manager class. Handles 'managed' Instance Groups. For more information on Instance Groups, see: https://cloud.google.com/compute/docs/instance-groups """ def __init__(self, id, name, zone, size, template, instance_group, driver, extra=None): """ :param id: Internal identifier of Instance Group. Display only. :type id: ``str`` :param name: The name of this Instance Group. :type name: ``str`` :param zone: Zone in witch the Instance Group belongs :type zone: :class: ``GCEZone`` :param size: Number of instances in this Instance Group. :type size: ``int`` :param template: An initialized :class:``GCEInstanceTemplate`` :type driver: :class:``GCEInstanceTemplate`` :param instance_group: An initialized :class:``GCEInstanceGroup`` :type driver: :class:``GCEInstanceGroup`` :param driver: An initialized :class:``GCENodeDriver`` :type driver: :class:``GCENodeDriver`` :param extra: A dictionary of extra information. :type extra: ``dict`` """ self.id = str(id) self.name = name self.zone = zone self.size = size or 0 self.template = template self.instance_group = instance_group self.driver = driver self.extra = extra UuidMixin.__init__(self) def destroy(self): """ Destroy this Instance Group. Destroys all instances managed by the Instance Group. :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_instancegroupmanager(manager=self) def list_managed_instances(self): """ Lists all of the instances in this managed instance group. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :return: ``list`` of ``dict`` containing instance URI and currentAction. See ex_instancegroupmanager_list_managed_instances for more details. :rtype: ``list`` """ return self.driver.ex_instancegroupmanager_list_managed_instances( manager=self) def set_instancetemplate(self, instancetemplate): """ Set the Instance Template for this Instance Group. :param instancetemplate: Instance Template to set. :type instancetemplate: :class:`GCEInstanceTemplate` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_instancegroupmanager_set_instancetemplate( manager=self, instancetemplate=instancetemplate) def recreate_instances(self): """ Recreate instances in a Managed Instance Group. :return: ``list`` of ``dict`` containing instance URI and currentAction. See ex_instancegroupmanager_list_managed_instances for more details. :rtype: ``list`` """ return self.driver.ex_instancegroupmanager_recreate_instances( manager=self) def delete_instances(self, node_list): """ Removes one or more instances from the specified instance group, and delete those instances. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param node_list: List of nodes to delete. :type node_list: ``list`` of :class:`Node` or ``list`` of :class:`GCENode` :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_instancegroupmanager_delete_instances( manager=self, node_list=node_list) def resize(self, size): """ Set the number of instances for this Instance Group. An increase in num_instances will result in VMs being created. A decrease will result in VMs being destroyed. :param size: Number to instances to resize to. :type size: ``int`` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_instancegroupmanager_resize(manager=self, size=size) def set_named_ports(self, named_ports): """ Sets the named ports for the instance group controlled by this manager. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param named_ports: Assigns a name to a port number. For example: {name: "http", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group. :type named_ports: ``list`` of {'name': ``str``, 'port`: ``int``} :return: Return True if successful. :rtype: ``bool`` """ return self.driver.ex_instancegroup_set_named_ports( instancegroup=self.instance_group, named_ports=named_ports) def __repr__(self): return '' % ( self.name, self.zone.name, self.size) class GCETargetPool(UuidMixin): def __init__(self, id, name, region, healthchecks, nodes, driver, extra=None): self.id = str(id) self.name = name self.region = region self.healthchecks = healthchecks self.nodes = nodes self.driver = driver self.extra = extra UuidMixin.__init__(self) def add_node(self, node): """ Add a node to this target pool. :param node: Node to add :type node: ``str`` or :class:`Node` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targetpool_add_node(targetpool=self, node=node) def remove_node(self, node): """ Remove a node from this target pool. :param node: Node to remove :type node: ``str`` or :class:`Node` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targetpool_remove_node(targetpool=self, node=node) def add_healthcheck(self, healthcheck): """ Add a healthcheck to this target pool. :param healthcheck: Healthcheck to add :type healthcheck: ``str`` or :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targetpool_add_healthcheck( targetpool=self, healthcheck=healthcheck) def remove_healthcheck(self, healthcheck): """ Remove a healthcheck from this target pool. :param healthcheck: Healthcheck to remove :type healthcheck: ``str`` or :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targetpool_remove_healthcheck( targetpool=self, healthcheck=healthcheck) def set_backup_targetpool(self, backup_targetpool, failover_ratio=0.1): """ Set a backup targetpool. :param backup_targetpool: The existing targetpool to use for failover traffic. :type backup_targetpool: :class:`GCETargetPool` :param failover_ratio: The percentage of healthy VMs must fall at or below this value before traffic will be sent to the backup targetpool (default 0.10) :type failover_ratio: ``float`` :return: True if successful :rtype: ``bool`` """ return self.driver.ex_targetpool_set_backup_targetpool( targetpool=self, backup_targetpool=backup_targetpool, failover_ratio=failover_ratio) def get_health(self, node=None): """ Return a hash of target pool instances and their health. :param node: Optional node to specify if only a specific node's health status should be returned :type node: ``str``, ``Node``, or ``None`` :return: List of hashes of nodes and their respective health :rtype: ``list`` of ``dict`` """ return self.driver.ex_targetpool_get_health(targetpool=self, node=node) def destroy(self): """ Destroy this Target Pool :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_targetpool(targetpool=self) def __repr__(self): return '' % ( self.id, self.name, self.region.name) class GCEUrlMap(UuidMixin): """A GCE URL Map.""" def __init__(self, id, name, default_service, host_rules, path_matchers, tests, driver, extra=None): self.id = str(id) self.name = name self.default_service = default_service self.host_rules = host_rules or [] self.path_matchers = path_matchers or [] self.tests = tests or [] self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return '' % (self.id, self.name) def destroy(self): """ Destroy this URL Map :return: True if successful :rtype: ``bool`` """ return self.driver.ex_destroy_urlmap(urlmap=self) class GCEZone(NodeLocation): """Subclass of NodeLocation to provide additional information.""" def __init__(self, id, name, status, maintenance_windows, deprecated, driver, extra=None): self.status = status self.maintenance_windows = maintenance_windows self.deprecated = deprecated self.extra = extra country = name.split('-')[0] super(GCEZone, self).__init__(id=str(id), name=name, country=country, driver=driver) @property def time_until_mw(self): """ Returns the time until the next Maintenance Window as a datetime.timedelta object. """ return self._get_time_until_mw() @property def next_mw_duration(self): """ Returns the duration of the next Maintenance Window as a datetime.timedelta object. """ return self._get_next_mw_duration() def _now(self): """ Returns current UTC time. Can be overridden in unittests. """ return datetime.datetime.utcnow() def _get_next_maint(self): """ Returns the next Maintenance Window. :return: A dictionary containing maintenance window info (or None if no maintenance windows are scheduled) The dictionary contains 4 keys with values of type ``str`` - name: The name of the maintenance window - description: Description of the maintenance window - beginTime: RFC3339 Timestamp - endTime: RFC3339 Timestamp :rtype: ``dict`` or ``None`` """ begin = None next_window = None if not self.maintenance_windows: return None if len(self.maintenance_windows) == 1: return self.maintenance_windows[0] for mw in self.maintenance_windows: begin_next = timestamp_to_datetime(mw['beginTime']) if (not begin) or (begin_next < begin): begin = begin_next next_window = mw return next_window def _get_time_until_mw(self): """ Returns time until next maintenance window. :return: Time until next maintenance window (or None if no maintenance windows are scheduled) :rtype: :class:`datetime.timedelta` or ``None`` """ next_window = self._get_next_maint() if not next_window: return None now = self._now() next_begin = timestamp_to_datetime(next_window['beginTime']) return next_begin - now def _get_next_mw_duration(self): """ Returns the duration of the next maintenance window. :return: Duration of next maintenance window (or None if no maintenance windows are scheduled) :rtype: :class:`datetime.timedelta` or ``None`` """ next_window = self._get_next_maint() if not next_window: return None next_begin = timestamp_to_datetime(next_window['beginTime']) next_end = timestamp_to_datetime(next_window['endTime']) return next_end - next_begin def __repr__(self): return '' % (self.id, self.name, self.status) class GCENodeDriver(NodeDriver): """ GCE Node Driver class. This is the primary driver for interacting with Google Compute Engine. It contains all of the standard libcloud methods, plus additional ex_* methods for more features. Note that many methods allow either objects or strings (or lists of objects/strings). In most cases, passing strings instead of objects will result in additional GCE API calls. """ connectionCls = GCEConnection api_name = 'google' name = "Google Compute Engine" type = Provider.GCE website = 'https://cloud.google.com/' # Google Compute Engine node states are mapped to Libcloud node states # per the following dict. GCE does not have an actual 'stopped' state # but instead uses a 'terminated' state to indicate the node exists # but is not running. In order to better match libcloud, GCE maps this # 'terminated' state to 'STOPPED'. # Also, when a node is deleted from GCE, it no longer exists and instead # will result in a ResourceNotFound error versus returning a placeholder # node in a 'terminated' state. # For more details, please see GCE's docs, # https://cloud.google.com/compute/docs/instances#checkmachinestatus NODE_STATE_MAP = { "PROVISIONING": NodeState.PENDING, "STAGING": NodeState.PENDING, "RUNNING": NodeState.RUNNING, "STOPPING": NodeState.PENDING, "TERMINATED": NodeState.STOPPED, "UNKNOWN": NodeState.UNKNOWN } AUTH_URL = "https://www.googleapis.com/auth/" SA_SCOPES_MAP = { # list derived from 'gcloud compute instances create --help' "bigquery": "bigquery", "cloud-platform": "cloud-platform", "compute-ro": "compute.readonly", "compute-rw": "compute", "datastore": "datastore", "logging-write": "logging.write", "monitoring": "monitoring", "monitoring-write": "monitoring.write", "service-control": "servicecontrol", "service-management": "service.management", "sql": "sqlservice", "sql-admin": "sqlservice.admin", "storage-full": "devstorage.full_control", "storage-ro": "devstorage.read_only", "storage-rw": "devstorage.read_write", "taskqueue": "taskqueue", "useraccounts-ro": "cloud.useraccounts.readonly", "useraccounts-rw": "cloud.useraccounts", "userinfo-email": "userinfo.email" } IMAGE_PROJECTS = { "centos-cloud": ["centos"], "coreos-cloud": ["coreos"], "debian-cloud": ["debian", "backports"], "gce-nvme": ["nvme-backports"], "google-containers": ["container-vm"], "opensuse-cloud": ["opensuse"], "rhel-cloud": ["rhel"], "suse-cloud": ["sles", "suse"], "ubuntu-os-cloud": ["ubuntu"], "windows-cloud": ["windows"], } BACKEND_SERVICE_PROTOCOLS = ['HTTP', 'HTTPS', 'HTTP2', 'TCP', 'SSL'] def __init__(self, user_id, key=None, datacenter=None, project=None, auth_type=None, scopes=None, credential_file=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :keyword datacenter: The name of the datacenter (zone) used for operations. :type datacenter: ``str`` :keyword project: Your GCE project name. (required) :type project: ``str`` :keyword auth_type: Accepted values are "SA" or "IA" or "GCE" ("Service Account" or "Installed Application" or "GCE" if libcloud is being used on a GCE instance with service account enabled). If not supplied, auth_type will be guessed based on value of user_id or if the code is being executed in a GCE instance. :type auth_type: ``str`` :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` :keyword credential_file: Path to file for caching authentication information used by GCEConnection. :type credential_file: ``str`` """ if not project: raise ValueError('Project name must be specified using ' '"project" keyword.') self.auth_type = auth_type self.project = project self.scopes = scopes self.credential_file = credential_file or \ GoogleOAuth2Credential.default_credential_file + '.' + self.project super(GCENodeDriver, self).__init__(user_id, key, **kwargs) # Cache Zone and Region information to reduce API calls and # increase speed self.base_path = '/compute/%s/projects/%s' % (API_VERSION, self.project) self.zone_list = self.ex_list_zones() self.zone_dict = {} for zone in self.zone_list: self.zone_dict[zone.name] = zone if datacenter: self.zone = self.ex_get_zone(datacenter) else: self.zone = None self.region_list = self.ex_list_regions() self.region_dict = {} for region in self.region_list: self.region_dict[region.name] = region if self.zone: self.region = self._get_region_from_zone(self.zone) else: self.region = None # Volume details are looked up in this name-zone dict. # It is populated if the volume name is not found or the dict is empty. self._ex_volume_dict = {} def ex_add_access_config(self, node, name, nic, nat_ip=None, config_type=None): """ Add a network interface access configuration to a node. :keyword node: The existing target Node (instance) that will receive the new access config. :type node: ``Node`` :keyword name: Name of the new access config. :type node: ``str`` :keyword nat_ip: The external existing static IP Address to use for the access config. If not provided, an ephemeral IP address will be allocated. :type nat_ip: ``str`` or ``None`` :keyword config_type: The type of access config to create. Currently the only supported type is 'ONE_TO_ONE_NAT'. :type config_type: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ if not isinstance(node, Node): raise ValueError("Must specify a valid libcloud node object.") node_name = node.name zone_name = node.extra['zone'].name config = {'name': name} if config_type is None: config_type = 'ONE_TO_ONE_NAT' config['type'] = config_type if nat_ip is not None: config['natIP'] = nat_ip params = {'networkInterface': nic} request = '/zones/%s/instances/%s/addAccessConfig' % (zone_name, node_name) self.connection.async_request(request, method='POST', data=config, params=params) return True def ex_delete_access_config(self, node, name, nic): """ Delete a network interface access configuration from a node. :keyword node: The existing target Node (instance) for the request. :type node: ``Node`` :keyword name: Name of the access config. :type name: ``str`` :keyword nic: Name of the network interface. :type nic: ``str`` :return: True if successful :rtype: ``bool`` """ if not isinstance(node, Node): raise ValueError("Must specify a valid libcloud node object.") node_name = node.name zone_name = node.extra['zone'].name params = {'accessConfig': name, 'networkInterface': nic} request = '/zones/%s/instances/%s/deleteAccessConfig' % (zone_name, node_name) self.connection.async_request(request, method='POST', params=params) return True def ex_set_node_metadata(self, node, metadata): """ Set metadata for the specified node. :keyword node: The existing target Node (instance) for the request. :type node: ``Node`` :keyword metadata: Set (or clear with None) metadata for this particular node. :type metadata: ``dict`` or ``None`` :return: True if successful :rtype: ``bool`` """ if not isinstance(node, Node): raise ValueError("Must specify a valid libcloud node object.") node_name = node.name zone_name = node.extra['zone'].name if 'metadata' in node.extra and \ 'fingerprint' in node.extra['metadata']: current_fp = node.extra['metadata']['fingerprint'] else: current_fp = 'absent' body = self._format_metadata(current_fp, metadata) request = '/zones/%s/instances/%s/setMetadata' % (zone_name, node_name) self.connection.async_request(request, method='POST', data=body) return True def ex_get_serial_output(self, node): """ Fetch the console/serial port output from the node. :keyword node: The existing target Node (instance) for the request. :type node: ``Node`` :return: A string containing serial port output of the node. :rtype: ``str`` """ if not isinstance(node, Node): raise ValueError("Must specify a valid libcloud node object.") node_name = node.name zone_name = node.extra['zone'].name request = '/zones/%s/instances/%s/serialPort' % (zone_name, node_name) response = self.connection.request(request, method='GET').object return response['contents'] def ex_list(self, list_fn, **kwargs): """ Wrap a list method in a :class:`GCEList` iterator. >>> for sublist in driver.ex_list(driver.ex_list_urlmaps).page(1): ... sublist ... [] [] [] :param list_fn: A bound list method from :class:`GCENodeDriver`. :type list_fn: ``instancemethod`` :return: An iterator that returns sublists from list_fn. :rtype: :class:`GCEList` """ return GCEList(driver=self, list_fn=list_fn, **kwargs) def ex_list_disktypes(self, zone=None): """ Return a list of DiskTypes for a zone or all. :keyword zone: The zone to return DiskTypes from. For example: 'us-central1-a'. If None, will return DiskTypes from self.zone. If 'all', will return all DiskTypes. :type zone: ``str`` or ``None`` :return: A list of static DiskType objects. :rtype: ``list`` of :class:`GCEDiskType` """ list_disktypes = [] zone = self._set_zone(zone) if zone is None: request = '/aggregated/diskTypes' else: request = '/zones/%s/diskTypes' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if zone is None: for v in response['items'].values(): zone_disktypes = [self._to_disktype(a) for a in v.get('diskTypes', [])] list_disktypes.extend(zone_disktypes) else: list_disktypes = [self._to_disktype(a) for a in response['items']] return list_disktypes def ex_set_usage_export_bucket(self, bucket, prefix=None): """ Used to retain Compute Engine resource usage, storing the CSV data in a Google Cloud Storage bucket. See the `docs `_ for more information. Please ensure you have followed the necessary setup steps prior to enabling this feature (e.g. bucket exists, ACLs are in place, etc.) :param bucket: Name of the Google Cloud Storage bucket. Specify the name in either 'gs://' or the full URL 'https://storage.googleapis.com/'. :type bucket: ``str`` :param prefix: Optional prefix string for all reports. :type prefix: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ if bucket.startswith('https://www.googleapis.com/') or \ bucket.startswith('gs://'): data = {'bucketName': bucket} else: raise ValueError("Invalid bucket name: %s" % bucket) if prefix: data['reportNamePrefix'] = prefix request = '/setUsageExportBucket' self.connection.async_request(request, method='POST', data=data) return True def ex_set_common_instance_metadata(self, metadata=None, force=False): """ Set common instance metadata for the project. Common uses are for setting 'sshKeys', or setting a project-wide 'startup-script' for all nodes (instances). Passing in ``None`` for the 'metadata' parameter will clear out all common instance metadata *except* for 'sshKeys'. If you also want to update 'sshKeys', set the 'force' parameter to ``True``. :param metadata: Dictionary of metadata. Can be either a standard python dictionary, or the format expected by GCE (e.g. {'items': [{'key': k1, 'value': v1}, ...}] :type metadata: ``dict`` or ``None`` :param force: Force update of 'sshKeys'. If force is ``False`` (the default), existing sshKeys will be retained. Setting force to ``True`` will either replace sshKeys if a new a new value is supplied, or deleted if no new value is supplied. :type force: ``bool`` :return: True if successful :rtype: ``bool`` """ if metadata: metadata = self._format_metadata('na', metadata) request = '/setCommonInstanceMetadata' project = self.ex_get_project() current_metadata = project.extra['commonInstanceMetadata'] fingerprint = current_metadata['fingerprint'] md_items = [] if 'items' in current_metadata: md_items = current_metadata['items'] # grab copy of current 'sshKeys' in case we want to retain them current_keys = "" for md in md_items: if md['key'] == 'sshKeys': current_keys = md['value'] new_md = self._set_project_metadata(metadata, force, current_keys) md = {'fingerprint': fingerprint, 'items': new_md} self.connection.async_request(request, method='POST', data=md) return True def ex_list_addresses(self, region=None): """ Return a list of static addresses for a region, 'global', or all. :keyword region: The region to return addresses from. For example: 'us-central1'. If None, will return addresses from region of self.zone. If 'all', will return all addresses. If 'global', it will return addresses in the global namespace. :type region: ``str`` or ``None`` :return: A list of static address objects. :rtype: ``list`` of :class:`GCEAddress` """ list_addresses = [] if region != 'global': region = self._set_region(region) if region is None: request = '/aggregated/addresses' elif region == 'global': request = '/global/addresses' else: request = '/regions/%s/addresses' % (region.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if region is None: for v in response['items'].values(): region_addresses = [self._to_address(a) for a in v.get('addresses', [])] list_addresses.extend(region_addresses) else: list_addresses = [self._to_address(a) for a in response['items']] return list_addresses def ex_list_backendservices(self): """ Return a list of backend services. :return: A list of backend service objects. :rtype: ``list`` of :class:`GCEBackendService` """ list_backendservices = [] response = self.connection.request('/global/backendServices', method='GET').object list_backendservices = [self._to_backendservice(d) for d in response.get('items', [])] return list_backendservices def ex_list_healthchecks(self): """ Return the list of health checks. :return: A list of health check objects. :rtype: ``list`` of :class:`GCEHealthCheck` """ list_healthchecks = [] request = '/global/httpHealthChecks' response = self.connection.request(request, method='GET').object list_healthchecks = [self._to_healthcheck(h) for h in response.get('items', [])] return list_healthchecks def ex_list_firewalls(self): """ Return the list of firewalls. :return: A list of firewall objects. :rtype: ``list`` of :class:`GCEFirewall` """ list_firewalls = [] request = '/global/firewalls' response = self.connection.request(request, method='GET').object list_firewalls = [self._to_firewall(f) for f in response.get('items', [])] return list_firewalls def ex_list_forwarding_rules(self, region=None, global_rules=False): """ Return the list of forwarding rules for a region or all. :keyword region: The region to return forwarding rules from. For example: 'us-central1'. If None, will return forwarding rules from the region of self.region (which is based on self.zone). If 'all', will return forwarding rules for all regions, which does not include the global forwarding rules. :type region: ``str`` or :class:`GCERegion` or ``None`` :keyword global_rules: List global forwarding rules instead of per-region rules. Setting True will cause 'region' parameter to be ignored. :type global_rules: ``bool`` :return: A list of forwarding rule objects. :rtype: ``list`` of :class:`GCEForwardingRule` """ list_forwarding_rules = [] if global_rules: region = None request = '/global/forwardingRules' else: region = self._set_region(region) if region is None: request = '/aggregated/forwardingRules' else: request = '/regions/%s/forwardingRules' % (region.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if not global_rules and region is None: for v in response['items'].values(): region_forwarding_rules = [ self._to_forwarding_rule(f) for f in v.get('forwardingRules', []) ] list_forwarding_rules.extend(region_forwarding_rules) else: list_forwarding_rules = [self._to_forwarding_rule(f) for f in response['items']] return list_forwarding_rules def list_images(self, ex_project=None, ex_include_deprecated=False): """ Return a list of image objects. If no project is specified, a list of all non-deprecated global and vendor images images is returned. By default, only non-deprecated images are returned. :keyword ex_project: Optional alternate project name. :type ex_project: ``str``, ``list`` of ``str``, or ``None`` :keyword ex_include_deprecated: If True, even DEPRECATED images will be returned. :type ex_include_deprecated: ``bool`` :return: List of GCENodeImage objects :rtype: ``list`` of :class:`GCENodeImage` """ dep = ex_include_deprecated if ex_project is not None: return self.ex_list_project_images(ex_project=ex_project, ex_include_deprecated=dep) image_list = self.ex_list_project_images(ex_project=None, ex_include_deprecated=dep) for img_proj in list(self.IMAGE_PROJECTS.keys()): try: image_list.extend( self.ex_list_project_images(ex_project=img_proj, ex_include_deprecated=dep)) except: # do not break if an OS type is invalid pass return image_list def ex_list_project_images(self, ex_project=None, ex_include_deprecated=False): """ Return a list of image objects for a project. If no project is specified, only a list of 'global' images is returned. :keyword ex_project: Optional alternate project name. :type ex_project: ``str``, ``list`` of ``str``, or ``None`` :keyword ex_include_deprecated: If True, even DEPRECATED images will be returned. :type ex_include_deprecated: ``bool`` :return: List of GCENodeImage objects :rtype: ``list`` of :class:`GCENodeImage` """ list_images = [] request = '/global/images' if ex_project is None: response = self.connection.request(request, method='GET').object for img in response.get('items', []): if 'deprecated' not in img: list_images.append(self._to_node_image(img)) else: if ex_include_deprecated: list_images.append(self._to_node_image(img)) else: list_images = [] # Save the connection request_path save_request_path = self.connection.request_path if isinstance(ex_project, str): ex_project = [ex_project] for proj in ex_project: # Override the connection request path new_request_path = save_request_path.replace(self.project, proj) self.connection.request_path = new_request_path try: response = self.connection.request(request, method='GET').object except: raise finally: # Restore the connection request_path self.connection.request_path = save_request_path for img in response.get('items', []): if 'deprecated' not in img: list_images.append(self._to_node_image(img)) else: if ex_include_deprecated: list_images.append(self._to_node_image(img)) return list_images def list_locations(self): """ Return a list of locations (zones). The :class:`ex_list_zones` method returns more comprehensive results, but this is here for compatibility. :return: List of NodeLocation objects :rtype: ``list`` of :class:`NodeLocation` """ list_locations = [] request = '/zones' response = self.connection.request(request, method='GET').object list_locations = [self._to_node_location(l) for l in response['items']] return list_locations def ex_list_routes(self): """ Return the list of routes. :return: A list of route objects. :rtype: ``list`` of :class:`GCERoute` """ list_routes = [] request = '/global/routes' response = self.connection.request(request, method='GET').object list_routes = [self._to_route(n) for n in response.get('items', [])] return list_routes def ex_list_sslcertificates(self): """ Retrieves the list of SslCertificate resources available to the specified project. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :return: A list of SSLCertificate objects. :rtype: ``list`` of :class:`GCESslCertificate` """ list_data = [] request = '/global/sslCertificates' response = self.connection.request(request, method='GET').object list_data = [self._to_sslcertificate(a) for a in response.get('items', [])] return list_data def ex_list_subnetworks(self, region=None): """ Return the list of subnetworks. :keyword region: Region for the subnetwork. Specify 'all' to return the aggregated list of subnetworks. :type region: ``str`` or :class:`GCERegion` :return: A list of subnetwork objects. :rtype: ``list`` of :class:`GCESubnetwork` """ region = self._set_region(region) if region is None: request = '/aggregated/subnetworks' else: request = '/regions/%s/subnetworks' % (region.name) list_subnetworks = [] response = self.connection.request(request, method='GET').object if 'items' in response: if region is None: for v in response['items'].values(): for i in v.get('subnetworks', []): try: list_subnetworks.append(self._to_subnetwork(i)) except ResourceNotFoundError: pass else: for i in response['items']: try: list_subnetworks.append(self._to_subnetwork(i)) except ResourceNotFoundError: pass return list_subnetworks def ex_list_networks(self): """ Return the list of networks. :return: A list of network objects. :rtype: ``list`` of :class:`GCENetwork` """ list_networks = [] request = '/global/networks' response = self.connection.request(request, method='GET').object list_networks = [self._to_network(n) for n in response.get('items', [])] return list_networks def list_nodes(self, ex_zone=None, ex_use_disk_cache=True): """ Return a list of nodes in the current zone or all zones. :keyword ex_zone: Optional zone name or 'all' :type ex_zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :keyword ex_use_disk_cache: Disk information for each node will retrieved from a dictionary rather than making a distinct API call for it. :type ex_use_disk_cache: ``bool`` :return: List of Node objects :rtype: ``list`` of :class:`Node` """ list_nodes = [] zone = self._set_zone(ex_zone) if zone is None: request = '/aggregated/instances' else: request = '/zones/%s/instances' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated response returns a dict for each zone if zone is None: # Create volume cache now for fast lookups of disk info. self._ex_populate_volume_dict() for v in response['items'].values(): for i in v.get('instances', []): try: list_nodes.append( self._to_node(i, use_disk_cache=ex_use_disk_cache) ) # If a GCE node has been deleted between # - is was listed by `request('.../instances', 'GET') # - it is converted by `self._to_node(i)` # `_to_node()` will raise a ResourceNotFoundError. # # Just ignore that node and return the list of the # other nodes. except ResourceNotFoundError: pass else: for i in response['items']: try: list_nodes.append( self._to_node(i, use_disk_cache=ex_use_disk_cache) ) # If a GCE node has been deleted between # - is was listed by `request('.../instances', 'GET') # - it is converted by `self._to_node(i)` # `_to_node()` will raise a ResourceNotFoundError. # # Just ignore that node and return the list of the # other nodes. except ResourceNotFoundError: pass # Clear the volume cache as lookups are complete. self._ex_volume_dict = {} return list_nodes def ex_list_regions(self): """ Return the list of regions. :return: A list of region objects. :rtype: ``list`` of :class:`GCERegion` """ list_regions = [] request = '/regions' response = self.connection.request(request, method='GET').object list_regions = [self._to_region(r) for r in response['items']] return list_regions def list_sizes(self, location=None): """ Return a list of sizes (machineTypes) in a zone. :keyword location: Location or Zone for sizes :type location: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :return: List of GCENodeSize objects :rtype: ``list`` of :class:`GCENodeSize` """ list_sizes = [] zone = self._set_zone(location) if zone is None: request = '/aggregated/machineTypes' else: request = '/zones/%s/machineTypes' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated response returns a dict for each zone if zone is None: for v in response['items'].values(): zone_sizes = [self._to_node_size(s) for s in v.get('machineTypes', [])] list_sizes.extend(zone_sizes) else: list_sizes = [self._to_node_size(s) for s in response['items']] return list_sizes def ex_list_snapshots(self): """ Return the list of disk snapshots in the project. :return: A list of snapshot objects :rtype: ``list`` of :class:`GCESnapshot` """ list_snapshots = [] request = '/global/snapshots' response = self.connection.request(request, method='GET').object list_snapshots = [self._to_snapshot(s) for s in response.get('items', [])] return list_snapshots def ex_list_targethttpproxies(self): """ Return the list of target HTTP proxies. :return: A list of target http proxy objects :rtype: ``list`` of :class:`GCETargetHttpProxy` """ request = '/global/targetHttpProxies' response = self.connection.request(request, method='GET').object return [self._to_targethttpproxy(u) for u in response.get('items', [])] def ex_list_targethttpsproxies(self): """ Return the list of target HTTPs proxies. :return: A list of target https proxy objects :rtype: ``list`` of :class:`GCETargetHttpsProxy` """ request = '/global/targetHttpsProxies' response = self.connection.request(request, method='GET').object return [self._to_targethttpsproxy(x) for x in response.get('items', [])] def ex_list_targetinstances(self, zone=None): """ Return the list of target instances. :return: A list of target instance objects :rtype: ``list`` of :class:`GCETargetInstance` """ list_targetinstances = [] zone = self._set_zone(zone) if zone is None: request = '/aggregated/targetInstances' else: request = '/zones/%s/targetInstances' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if zone is None: for v in response['items'].values(): zone_targetinstances = [ self._to_targetinstance(t) for t in v.get('targetInstances', []) ] list_targetinstances.extend(zone_targetinstances) else: list_targetinstances = [self._to_targetinstance(t) for t in response['items']] return list_targetinstances def ex_list_targetpools(self, region=None): """ Return the list of target pools. :return: A list of target pool objects :rtype: ``list`` of :class:`GCETargetPool` """ list_targetpools = [] region = self._set_region(region) if region is None: request = '/aggregated/targetPools' else: request = '/regions/%s/targetPools' % (region.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if region is None: for v in response['items'].values(): region_targetpools = [self._to_targetpool(t) for t in v.get('targetPools', [])] list_targetpools.extend(region_targetpools) else: list_targetpools = [self._to_targetpool(t) for t in response['items']] return list_targetpools def ex_list_urlmaps(self): """ Return the list of URL Maps in the project. :return: A list of url map objects :rtype: ``list`` of :class:`GCEUrlMap` """ request = '/global/urlMaps' response = self.connection.request(request, method='GET').object return [self._to_urlmap(u) for u in response.get('items', [])] def ex_list_instancegroups(self, zone): """ Retrieves the list of instance groups that are located in the specified project and zone. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :param zone: The name of the zone where the instance group is located. :type zone: ``str`` :return: A list of instance group mgr objects. :rtype: ``list`` of :class:`GCEInstanceGroupManagers` """ list_data = [] zone = self._set_zone(zone) if zone is None: request = '/aggregated/instanceGroups' else: request = '/zones/%s/instanceGroups' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if zone is None: for v in response['items'].values(): zone_data = [self._to_instancegroup(a) for a in v.get('instanceGroups', [])] list_data.extend(zone_data) else: list_data = [self._to_instancegroup(a) for a in response['items']] return list_data def ex_list_instancegroupmanagers(self, zone=None): """ Return a list of Instance Group Managers. :keyword zone: The zone to return InstanceGroupManagers from. For example: 'us-central1-a'. If None, will return InstanceGroupManagers from self.zone. If 'all', will return all InstanceGroupManagers. :type zone: ``str`` or ``None`` :return: A list of instance group mgr objects. :rtype: ``list`` of :class:`GCEInstanceGroupManagers` """ list_managers = [] zone = self._set_zone(zone) if zone is None: request = '/aggregated/instanceGroupManagers' else: request = '/zones/%s/instanceGroupManagers' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each region if zone is None: for v in response['items'].values(): zone_managers = [ self._to_instancegroupmanager(a) for a in v.get('instanceGroupManagers', []) ] list_managers.extend(zone_managers) else: list_managers = [self._to_instancegroupmanager(a) for a in response['items']] return list_managers def ex_list_instancetemplates(self): """ Return the list of Instance Templates. :return: A list of Instance Template Objects :rtype: ``list`` of :class:`GCEInstanceTemplate` """ request = '/global/instanceTemplates' response = self.connection.request(request, method='GET').object return [self._to_instancetemplate(u) for u in response.get('items', [])] def ex_list_autoscalers(self, zone=None): """ Return the list of AutoScalers. :keyword zone: The zone to return InstanceGroupManagers from. For example: 'us-central1-a'. If None, will return InstanceGroupManagers from self.zone. If 'all', will return all InstanceGroupManagers. :type zone: ``str`` or ``None`` :return: A list of AutoScaler Objects :rtype: ``list`` of :class:`GCEAutoScaler` """ list_autoscalers = [] zone = self._set_zone(zone) if zone is None: request = '/aggregated/autoscalers' else: request = '/zones/%s/autoscalers' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated result returns dictionaries for each zone. if zone is None: for v in response['items'].values(): zone_as = [self._to_autoscaler(a) for a in v.get('autoscalers', [])] list_autoscalers.extend(zone_as) else: list_autoscalers = [self._to_autoscaler(a) for a in response['items']] return list_autoscalers def list_volumes(self, ex_zone=None): """ Return a list of volumes for a zone or all. Will return list from provided zone, or from the default zone unless given the value of 'all'. :keyword ex_zone: The zone to return volumes from. :type ex_zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :return: A list of volume objects. :rtype: ``list`` of :class:`StorageVolume` """ list_volumes = [] zone = self._set_zone(ex_zone) if zone is None: request = '/aggregated/disks' else: request = '/zones/%s/disks' % (zone.name) response = self.connection.request(request, method='GET').object if 'items' in response: # The aggregated response returns a dict for each zone if zone is None: for v in response['items'].values(): zone_volumes = [self._to_storage_volume(d) for d in v.get('disks', [])] list_volumes.extend(zone_volumes) else: list_volumes = [self._to_storage_volume(d) for d in response['items']] return list_volumes def ex_list_zones(self): """ Return the list of zones. :return: A list of zone objects. :rtype: ``list`` of :class:`GCEZone` """ list_zones = [] request = '/zones' response = self.connection.request(request, method='GET').object list_zones = [self._to_zone(z) for z in response['items']] return list_zones def ex_create_address(self, name, region=None, address=None, description=None): """ Create a static address in a region, or a global address. :param name: Name of static address :type name: ``str`` :keyword region: Name of region for the address (e.g. 'us-central1') Use 'global' to create a global address. :type region: ``str`` or :class:`GCERegion` :keyword address: Ephemeral IP address to promote to a static one (e.g. 'xxx.xxx.xxx.xxx') :type address: ``str`` or ``None`` :keyword description: Optional descriptive comment. :type description: ``str`` or ``None`` :return: Static Address object :rtype: :class:`GCEAddress` """ region = region or self.region if region != 'global' and not hasattr(region, 'name'): region = self.ex_get_region(region) elif region is None: raise ValueError('REGION_NOT_SPECIFIED', 'Region must be provided for an address') address_data = {'name': name} if address: address_data['address'] = address if description: address_data['description'] = description if region == 'global': request = '/global/addresses' else: request = '/regions/%s/addresses' % (region.name) self.connection.async_request(request, method='POST', data=address_data) return self.ex_get_address(name, region=region) def ex_create_autoscaler(self, name, zone, instance_group, policy, description=None): """ Create an Autoscaler for an Instance Group. :param name: The name of the Autoscaler :type name: ``str`` :param zone: The zone to which the Instance Group belongs :type zone: ``str`` or :class:`GCEZone` :param instance_group: An Instance Group Manager object. :type: :class:`GCEInstanceGroupManager` :param policy: A dict containing policy configuration. See the API documentation for Autoscalers for more details. :type: ``dict`` :return: An Autoscaler object. :rtype: :class:`GCEAutoscaler` """ zone = zone or self.zone autoscaler_data = {} autoscaler_data = {'name': name} if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) autoscaler_data['zone'] = zone.extra['selfLink'] # TODO(supertom): we should validate the policy autoscaler_data['autoscalingPolicy'] = policy request = '/zones/%s/autoscalers' % zone.name autoscaler_data['target'] = instance_group.extra['selfLink'] self.connection.async_request(request, method='POST', data=autoscaler_data) return self.ex_get_autoscaler(name, zone) def ex_create_backend(self, instance_group, balancing_mode='UTILIZATION', max_utilization=None, max_rate=None, max_rate_per_instance=None, capacity_scaler=1, description=None): """ Helper Object to create a backend. :param instance_group: The Instance Group for this Backend. :type instance_group: :class: `GCEInstanceGroup` :param balancing_mode: Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the valid values are UTILIZATION (default) and RATE. For global SSL load balancing, the valid values are UTILIZATION (default) and CONNECTION. :type balancing_mode: ``str`` :param max_utilization: Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0]. :type max_utilization: ``float`` :param max_rate: The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set. :type max_rate: ``int`` :param max_rate_per_instance: The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set. :type max_rate_per_instance: ``float`` :param capacity_scaler: A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE, or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. Valid range is [0.0,1.0]. :type capacity_scaler: ``float`` :param description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :return: A GCEBackend object. :rtype: :class: `GCEBackend` """ return GCEBackend( instance_group=instance_group, balancing_mode=balancing_mode, max_utilization=max_utilization, max_rate=max_rate, max_rate_per_instance=max_rate_per_instance, capacity_scaler=capacity_scaler, description=description) def ex_create_backendservice(self, name, healthchecks, backends=[], protocol=None, description=None, timeout_sec=None, enable_cdn=False, port=None, port_name=None): """ Create a global Backend Service. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :type name: ``str`` :param healthchecks: A list of HTTP Health Checks to use for this service. There must be at least one. :type healthchecks: ``list`` of (``str`` or :class:`GCEHealthCheck`) :keyword backends: The list of backends that serve this BackendService. :type backends: ``list`` of :class `GCEBackend` or list of ``dict`` :keyword timeout_sec: How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds. :type timeout_sec: ``integer`` :keyword enable_cdn: If true, enable Cloud CDN for this BackendService. When the load balancing scheme is INTERNAL, this field is not used. :type enable_cdn: ``bool`` :keyword port: Deprecated in favor of port_name. The TCP port to connect on the backend. The default value is 80. This cannot be used for internal load balancing. :type port: ``integer`` :keyword port_name: Name of backend port. The same name should appear in the instance groups referenced by this service. :type port_name: ``str`` :keyword protocol: The protocol this Backend Service uses to communicate with backends. Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. :type protocol: ``str`` :return: A Backend Service object. :rtype: :class:`GCEBackendService` """ backendservice_data = {'name': name, 'healthChecks': [], 'backends': [], 'enableCDN': enable_cdn} for hc in healthchecks: if not hasattr(hc, 'extra'): hc = self.ex_get_healthcheck(name=hc) backendservice_data['healthChecks'].append(hc.extra['selfLink']) for be in backends: if isinstance(be, GCEBackend): backendservice_data['backends'].append(be.to_backend_dict()) else: backendservice_data['backends'].append(be) if port: backendservice_data['port'] = port if port_name: backendservice_data['portName'] = port_name if timeout_sec: backendservice_data['timeoutSec'] = timeout_sec if protocol: if protocol in self.BACKEND_SERVICE_PROTOCOLS: backendservice_data['protocol'] = protocol else: raise ValueError('Protocol must be one of %s' % ','.join(self.BACKEND_SERVICE_PROTOCOLS)) if description: backendservice_data['description'] = description request = '/global/backendServices' self.connection.async_request(request, method='POST', data=backendservice_data) return self.ex_get_backendservice(name) def ex_create_healthcheck(self, name, host=None, path=None, port=None, interval=None, timeout=None, unhealthy_threshold=None, healthy_threshold=None, description=None): """ Create an Http Health Check. :param name: Name of health check :type name: ``str`` :keyword host: Hostname of health check request. Defaults to empty and public IP is used instead. :type host: ``str`` :keyword path: The request path for the check. Defaults to /. :type path: ``str`` :keyword port: The TCP port number for the check. Defaults to 80. :type port: ``int`` :keyword interval: How often (in seconds) to check. Defaults to 5. :type interval: ``int`` :keyword timeout: How long to wait before failing. Defaults to 5. :type timeout: ``int`` :keyword unhealthy_threshold: How many failures before marking unhealthy. Defaults to 2. :type unhealthy_threshold: ``int`` :keyword healthy_threshold: How many successes before marking as healthy. Defaults to 2. :type healthy_threshold: ``int`` :keyword description: The description of the check. Defaults to None. :type description: ``str`` or ``None`` :return: Health Check object :rtype: :class:`GCEHealthCheck` """ hc_data = {} hc_data['name'] = name if host: hc_data['host'] = host if description: hc_data['description'] = description # As of right now, the 'default' values aren't getting set when called # through the API, so set them explicitly hc_data['requestPath'] = path or '/' hc_data['port'] = port or 80 hc_data['checkIntervalSec'] = interval or 5 hc_data['timeoutSec'] = timeout or 5 hc_data['unhealthyThreshold'] = unhealthy_threshold or 2 hc_data['healthyThreshold'] = healthy_threshold or 2 request = '/global/httpHealthChecks' self.connection.async_request(request, method='POST', data=hc_data) return self.ex_get_healthcheck(name) def ex_create_firewall(self, name, allowed, network='default', source_ranges=None, source_tags=None, target_tags=None): """ Create a firewall on a network. Firewall rules should be supplied in the "allowed" field. This is a list of dictionaries formated like so ("ports" is optional):: [{"IPProtocol": "", "ports": ""}] For example, to allow tcp on port 8080 and udp on all ports, 'allowed' would be:: [{"IPProtocol": "tcp", "ports": ["8080"]}, {"IPProtocol": "udp"}] See `Firewall Reference `_ for more information. :param name: Name of the firewall to be created :type name: ``str`` :param allowed: List of dictionaries with rules :type allowed: ``list`` of ``dict`` :keyword network: The network that the firewall applies to. :type network: ``str`` or :class:`GCENetwork` :keyword source_ranges: A list of IP ranges in CIDR format that the firewall should apply to. Defaults to ['0.0.0.0/0'] :type source_ranges: ``list`` of ``str`` :keyword source_tags: A list of source instance tags the rules apply to. :type source_tags: ``list`` of ``str`` :keyword target_tags: A list of target instance tags the rules apply to. :type target_tags: ``list`` of ``str`` :return: Firewall object :rtype: :class:`GCEFirewall` """ firewall_data = {} if not hasattr(network, 'name'): nw = self.ex_get_network(network) else: nw = network firewall_data['name'] = name firewall_data['allowed'] = allowed firewall_data['network'] = nw.extra['selfLink'] if source_ranges is None and source_tags is None: source_ranges = ['0.0.0.0/0'] if source_ranges is not None: firewall_data['sourceRanges'] = source_ranges if source_tags is not None: firewall_data['sourceTags'] = source_tags if target_tags is not None: firewall_data['targetTags'] = target_tags request = '/global/firewalls' self.connection.async_request(request, method='POST', data=firewall_data) return self.ex_get_firewall(name) def ex_create_forwarding_rule(self, name, target=None, region=None, protocol='tcp', port_range=None, address=None, description=None, global_rule=False, targetpool=None, lb_scheme=None): """ Create a forwarding rule. :param name: Name of forwarding rule to be created :type name: ``str`` :keyword target: The target of this forwarding rule. For global forwarding rules this must be a global TargetHttpProxy. For regional rules this may be either a TargetPool or TargetInstance. If passed a string instead of the object, it will be the name of a TargetHttpProxy for global rules or a TargetPool for regional rules. A TargetInstance must be passed by object. (required) :type target: ``str`` or :class:`GCETargetHttpProxy` or :class:`GCETargetInstance` or :class:`GCETargetPool` :keyword region: Region to create the forwarding rule in. Defaults to self.region. Ignored if global_rule is True. :type region: ``str`` or :class:`GCERegion` :keyword protocol: Should be 'tcp' or 'udp' :type protocol: ``str`` :keyword port_range: Single port number or range separated by a dash. Examples: '80', '5000-5999'. Required for global forwarding rules, optional for regional rules. :type port_range: ``str`` :keyword address: Optional static address for forwarding rule. Must be in same region. :type address: ``str`` or :class:`GCEAddress` :keyword description: The description of the forwarding rule. Defaults to None. :type description: ``str`` or ``None`` :keyword targetpool: Deprecated parameter for backwards compatibility. Use target instead. :type targetpool: ``str`` or :class:`GCETargetPool` :keyword lb_scheme: Load balancing scheme, can be 'EXTERNAL' or 'INTERNAL'. Defaults to 'EXTERNAL'. :type lb_scheme: ``str`` or ``None`` :return: Forwarding Rule object :rtype: :class:`GCEForwardingRule` """ forwarding_rule_data = {'name': name} if global_rule: if not hasattr(target, 'name'): target = self.ex_get_targethttpproxy(target) else: region = region or self.region if not hasattr(region, 'name'): region = self.ex_get_region(region) forwarding_rule_data['region'] = region.extra['selfLink'] if not target: target = targetpool # Backwards compatibility if not hasattr(target, 'name'): target = self.ex_get_targetpool(target, region) forwarding_rule_data['target'] = target.extra['selfLink'] forwarding_rule_data['IPProtocol'] = protocol.upper() if address: if not hasattr(address, 'name'): address = self.ex_get_address(address, 'global' if global_rule else region) forwarding_rule_data['IPAddress'] = address.address if port_range: forwarding_rule_data['portRange'] = port_range if description: forwarding_rule_data['description'] = description if lb_scheme: forwarding_rule_data['loadBalancingScheme'] = lb_scheme if global_rule: request = '/global/forwardingRules' else: request = '/regions/%s/forwardingRules' % (region.name) self.connection.async_request(request, method='POST', data=forwarding_rule_data) return self.ex_get_forwarding_rule(name, global_rule=global_rule) def ex_create_image(self, name, volume, description=None, family=None, guest_os_features=None, use_existing=True, wait_for_completion=True): """ Create an image from the provided volume. :param name: The name of the image to create. :type name: ``str`` :param volume: The volume to use to create the image, or the Google Cloud Storage URI :type volume: ``str`` or :class:`StorageVolume` :keyword description: Description of the new Image :type description: ``str`` :keyword family: The name of the image family to which this image belongs. If you create resources by specifying an image family instead of a specific image name, the resource uses the latest non-deprecated image that is set with that family name. :type family: ``str`` :keyword guest_os_features: Features of the guest operating system, valid for bootable images only. :type guest_os_features: ``list`` of ``str`` or ``None`` :keyword use_existing: If True and an image with the given name already exists, return an object for that image instead of attempting to create a new image. :type use_existing: ``bool`` :keyword wait_for_completion: If True, wait until the new image is created before returning a new NodeImage Otherwise, return a new NodeImage instance, and let the user track the creation progress :type wait_for_completion: ``bool`` :return: A GCENodeImage object for the new image :rtype: :class:`GCENodeImage` """ image_data = {} image_data['name'] = name image_data['description'] = description image_data['family'] = family if isinstance(volume, StorageVolume): image_data['sourceDisk'] = volume.extra['selfLink'] image_data['zone'] = volume.extra['zone'].name elif (isinstance(volume, str) and volume.startswith('https://') and volume.endswith('tar.gz')): image_data['rawDisk'] = {'source': volume, 'containerType': 'TAR'} else: raise ValueError('Source must be instance of StorageVolume or URI') if guest_os_features: image_data['guestOsFeatures'] = [] if isinstance(guest_os_features, str): guest_os_features = [guest_os_features] for feature in guest_os_features: image_data['guestOsFeatures'].append({'type': feature}) request = '/global/images' try: if wait_for_completion: self.connection.async_request(request, method='POST', data=image_data) else: self.connection.request(request, method='POST', data=image_data) except ResourceExistsError: e = sys.exc_info()[1] if not use_existing: raise e return self.ex_get_image(name) def ex_copy_image(self, name, url, description=None, family=None, guest_os_features=None): """ Copy an image to your image collection. :param name: The name of the image :type name: ``str`` :param url: The URL to the image. The URL can start with `gs://` :type url: ``str`` :param description: The description of the image :type description: ``str`` :param family: The family of the image :type family: ``str`` :param guest_os_features: The features of the guest operating system. :type guest_os_features: ``list`` of ``str`` or ``None`` :return: NodeImage object based on provided information or None if an image with that name is not found. :rtype: :class:`NodeImage` or ``None`` """ # The URL for an image can start with gs:// if url.startswith('gs://'): url = url.replace('gs://', 'https://storage.googleapis.com/', 1) image_data = { 'name': name, 'description': description, 'family': family, 'sourceType': 'RAW', 'rawDisk': { 'source': url, }, } if guest_os_features: image_data['guestOsFeatures'] = [] if isinstance(guest_os_features, str): guest_os_features = [guest_os_features] for feature in guest_os_features: image_data['guestOsFeatures'].append({'type': feature}) request = '/global/images' self.connection.async_request(request, method='POST', data=image_data) return self.ex_get_image(name) def ex_create_instancegroup(self, name, zone, description=None, network=None, subnetwork=None, named_ports=None): """ Creates an instance group in the specified project using the parameters that are included in the request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param name: Required. The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035. :type name: ``str`` :param zone: The URL of the zone where the instance group is located. :type zone: :class:`GCEZone` :keyword description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :keyword network: The URL of the network to which all instances in the instance group belong. :type network: :class:`GCENetwork` :keyword subnetwork: The URL of the subnetwork to which all instances in the instance group belong. :type subnetwork: :class:`GCESubnetwork` :keyword named_ports: Assigns a name to a port number. For example: {name: "http", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group. :type named_ports: ``list`` of {'name': ``str``, 'port`: ``int``} :return: `GCEInstanceGroup` object. :rtype: :class:`GCEInstanceGroup` """ zone = zone or self.zone if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) request = "/zones/%s/instanceGroups" % (zone.name) request_data = {} request_data['name'] = name request_data['zone'] = zone.extra['selfLink'] if description: request_data['description'] = description if network: request_data['network'] = network.extra['selfLink'] if subnetwork: request_data['subnetwork'] = subnetwork.extra['selfLink'] if named_ports: request_data['namedPorts'] = named_ports self.connection.async_request(request, method='POST', data=request_data) return self.ex_get_instancegroup(name, zone) def ex_create_instancegroupmanager(self, name, zone, template, size, base_instance_name=None, description=None): """ Create a Managed Instance Group. :param name: Name of the Instance Group. :type name: ``str`` :param zone: The zone to which the Instance Group belongs :type zone: ``str`` or :class:`GCEZone` or ``None`` :param template: The Instance Template. Should be an instance of GCEInstanceTemplate or a string. :type template: ``str`` or :class:`GCEInstanceTemplate` :param base_instance_name: The prefix for each instance created. If None, Instance Group name will be used. :type base_instance_name: ``str`` :param description: User-supplied text about the Instance Group. :type description: ``str`` :return: An Instance Group Manager object. :rtype: :class:`GCEInstanceGroupManager` """ zone = zone or self.zone if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) request = '/zones/%s/instanceGroupManagers' % (zone.name) manager_data = {} # If the user gave us a name, we fetch the GCEInstanceTemplate for it. if not hasattr(template, 'name'): template = self.ex_get_instancetemplate(template) manager_data['instanceTemplate'] = template.extra['selfLink'] # If base_instance_name is not set, we use name. manager_data['baseInstanceName'] = name if base_instance_name is not None: manager_data['baseInstanceName'] = base_instance_name manager_data['name'] = name manager_data['targetSize'] = size manager_data['description'] = description self.connection.async_request(request, method='POST', data=manager_data) return self.ex_get_instancegroupmanager(name, zone) def ex_create_route(self, name, dest_range, priority=500, network="default", tags=None, next_hop=None, description=None): """ Create a route. :param name: Name of route to be created :type name: ``str`` :param dest_range: Address range of route in CIDR format. :type dest_range: ``str`` :param priority: Priority value, lower values take precedence :type priority: ``int`` :param network: The network the route belongs to. Can be either the full URL of the network, the name of the network or a libcloud object. :type network: ``str`` or ``GCENetwork`` :param tags: List of instance-tags for routing, empty for all nodes :type tags: ``list`` of ``str`` or ``None`` :param next_hop: Next traffic hop. Use ``None`` for the default Internet gateway, or specify an instance or IP address. :type next_hop: ``str``, ``Node``, or ``None`` :param description: Custom description for the route. :type description: ``str`` or ``None`` :return: Route object :rtype: :class:`GCERoute` """ route_data = {} route_data['name'] = name route_data['destRange'] = dest_range route_data['priority'] = priority route_data['description'] = description if isinstance(network, str) and network.startswith('https://'): network_uri = network elif isinstance(network, str): network = self.ex_get_network(network) network_uri = network.extra['selfLink'] else: network_uri = network.extra['selfLink'] route_data['network'] = network_uri route_data['tags'] = tags if next_hop is None: url = 'https://www.googleapis.com/compute/%s/projects/%s/%s' % ( API_VERSION, self.project, "global/gateways/default-internet-gateway") route_data['nextHopGateway'] = url elif isinstance(next_hop, str): route_data['nextHopIp'] = next_hop else: route_data['nextHopInstance'] = next_hop.extra['selfLink'] request = '/global/routes' self.connection.async_request(request, method='POST', data=route_data) return self.ex_get_route(name) def ex_create_sslcertificate(self, name, certificate=None, private_key=None, description=None): """ Creates a SslCertificate resource in the specified project using the data included in the request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :type name: ``str`` :param certificate: A string containing local certificate file in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. :type certificate: ``str`` :param private_key: A string containing a write-only private key in PEM format. Only insert RPCs will include this field. :type private_key: ``str`` :keyword description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :return: `GCESslCertificate` object. :rtype: :class:`GCESslCertificate` """ request = "/global/sslCertificates" % () request_data = {} request_data['name'] = name request_data['certificate'] = certificate request_data['privateKey'] = private_key request_data['description'] = description self.connection.async_request(request, method='POST', data=request_data) return self.ex_get_sslcertificate(name) def ex_create_subnetwork(self, name, cidr=None, network=None, region=None, description=None): """ Create a subnetwork. :param name: Name of subnetwork to be created :type name: ``str`` :param cidr: Address range of network in CIDR format. :type cidr: ``str`` :param network: The network name or object this subnet belongs to. :type network: ``str`` or :class:`GCENetwork` :param region: The region the subnetwork belongs to. :type region: ``str`` or :class:`GCERegion` :param description: Custom description for the network. :type description: ``str`` or ``None`` :return: Subnetwork object :rtype: :class:`GCESubnetwork` """ if not cidr: raise ValueError("Must provide an IP network in CIDR notation.") if not network: raise ValueError("Must provide a network for the subnetwork.") else: if isinstance(network, GCENetwork): network_url = network.extra['selfLink'] else: if network.startswith('https://'): network_url = network else: network_obj = self.ex_get_network(network) network_url = network_obj.extra['selfLink'] if not region: raise ValueError("Must provide a region for the subnetwork.") else: if isinstance(region, GCERegion): region_url = region.extra['selfLink'] else: if region.startswith('https://'): region_url = region else: region_obj = self.ex_get_region(region) region_url = region_obj.extra['selfLink'] subnet_data = {} subnet_data['name'] = name subnet_data['description'] = description subnet_data['ipCidrRange'] = cidr subnet_data['network'] = network_url subnet_data['region'] = region_url region_name = region_url.split('/')[-1] request = '/regions/%s/subnetworks' % (region_name) self.connection.async_request(request, method='POST', data=subnet_data) return self.ex_get_subnetwork(name, region_name) def ex_create_network(self, name, cidr, description=None, mode="legacy"): """ Create a network. In November 2015, Google introduced Subnetworks and suggests using networks with 'auto' generated subnetworks. See, the `subnet docs `_ for more details. Note that libcloud follows the usability pattern from the Cloud SDK (e.g. 'gcloud compute' command-line utility) and uses 'mode' to specify 'auto', 'custom', or 'legacy'. :param name: Name of network to be created :type name: ``str`` :param cidr: Address range of network in CIDR format. :type cidr: ``str`` or ``None`` :param description: Custom description for the network. :type description: ``str`` or ``None`` :param mode: Create a 'auto', 'custom', or 'legacy' network. :type mode: ``str`` :return: Network object :rtype: :class:`GCENetwork` """ network_data = {} network_data['name'] = name network_data['description'] = description if mode.lower() not in ['auto', 'custom', 'legacy']: raise ValueError("Invalid network mode: '%s'. Must be 'auto', " "'custom', or 'legacy'." % mode) if cidr and mode in ['auto', 'custom']: raise ValueError("Can only specify IPv4Range with 'legacy' mode.") request = '/global/networks' if mode == 'legacy': if not cidr: raise ValueError("Must specify IPv4Range with 'legacy' mode.") network_data['IPv4Range'] = cidr else: network_data['autoCreateSubnetworks'] = (mode.lower() == 'auto') self.connection.async_request(request, method='POST', data=network_data) return self.ex_get_network(name) def create_node( self, name, size, image, location=None, ex_network='default', ex_subnetwork=None, ex_tags=None, ex_metadata=None, ex_boot_disk=None, use_existing_disk=True, external_ip='ephemeral', ex_disk_type='pd-standard', ex_disk_auto_delete=True, ex_service_accounts=None, description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None, ex_nic_gce_struct=None, ex_on_host_maintenance=None, ex_automatic_restart=None, ex_preemptible=None, ex_image_family=None): """ Create a new node and return a node object for the node. :param name: The name of the node to create. :type name: ``str`` :param size: The machine type to use. :type size: ``str`` or :class:`GCENodeSize` :param image: The image to use to create the node (or, if attaching a persistent disk, the image used to create the disk) :type image: ``str`` or :class:`GCENodeImage` or ``None`` :keyword location: The location (zone) to create the node in. :type location: ``str`` or :class:`NodeLocation` or :class:`GCEZone` or ``None`` :keyword ex_network: The network to associate with the node. :type ex_network: ``str`` or :class:`GCENetwork` :keyword ex_subnetwork: The subnetwork to associate with the node. :type ex_subnetwork: ``str`` or :class:`GCESubnetwork` :keyword ex_tags: A list of tags to associate with the node. :type ex_tags: ``list`` of ``str`` or ``None`` :keyword ex_metadata: Metadata dictionary for instance. :type ex_metadata: ``dict`` or ``None`` :keyword ex_boot_disk: The boot disk to attach to the instance. :type ex_boot_disk: :class:`StorageVolume` or ``str`` or ``None`` :keyword use_existing_disk: If True and if an existing disk with the same name/location is found, use that disk instead of creating a new one. :type use_existing_disk: ``bool`` :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. To use an existing static IP address, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` or ``str`` or ``None`` :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type ex_disk_type: ``str`` or :class:`GCEDiskType` :keyword ex_disk_auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type ex_disk_auto_delete: ``bool`` :keyword ex_service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type ex_service_accounts: ``list`` :keyword description: The description of the node (instance). :type description: ``str`` or ``None`` :keyword ex_can_ip_forward: Set to ``True`` to allow this node to send/receive non-matching src/dst packets. :type ex_can_ip_forward: ``bool`` or ``None`` :keyword ex_disks_gce_struct: Support for passing in the GCE-specific formatted disks[] structure. No attempt is made to ensure proper formatting of the disks[] structure. Using this structure obviates the need of using other disk params like 'ex_boot_disk', etc. See the GCE docs for specific details. :type ex_disks_gce_struct: ``list`` or ``None`` :keyword ex_nic_gce_struct: Support passing in the GCE-specific formatted networkInterfaces[] structure. No attempt is made to ensure proper formatting of the networkInterfaces[] data. Using this structure obviates the need of using 'external_ip' and 'ex_network'. See the GCE docs for details. :type ex_nic_gce_struct: ``list`` or ``None`` :keyword ex_on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type ex_on_host_maintenance: ``str`` or ``None`` :keyword ex_automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type ex_automatic_restart: ``bool`` or ``None`` :keyword ex_preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type ex_preemptible: ``bool`` or ``None`` :keyword ex_image_family: Determine image from an 'Image Family' instead of by name. 'image' should be None to use this keyword. :type ex_image_family: ``str`` or ``None`` :return: A Node object for the new node. :rtype: :class:`Node` """ if ex_boot_disk and ex_disks_gce_struct: raise ValueError("Cannot specify both 'ex_boot_disk' and " "'ex_disks_gce_struct'") if image and ex_image_family: raise ValueError("Cannot specify both 'image' and " "'ex_image_family'") if not (image or ex_image_family or ex_boot_disk or ex_disks_gce_struct): raise ValueError("Missing root device or image. Must specify an " "'image', 'ex_image_family', existing " "'ex_boot_disk', or use the " "'ex_disks_gce_struct'.") location = location or self.zone if not hasattr(location, 'name'): location = self.ex_get_zone(location) if not hasattr(size, 'name'): size = self.ex_get_size(size, location) if not hasattr(ex_network, 'name'): ex_network = self.ex_get_network(ex_network) if ex_subnetwork and not hasattr(ex_subnetwork, 'name'): ex_subnetwork = \ self.ex_get_subnetwork(ex_subnetwork, region=self._get_region_from_zone( location)) if ex_image_family: image = self.ex_get_image_from_family(ex_image_family) if image and not hasattr(image, 'name'): image = self.ex_get_image(image) if not hasattr(ex_disk_type, 'name'): ex_disk_type = self.ex_get_disktype(ex_disk_type, zone=location) if ex_boot_disk and not hasattr(ex_boot_disk, 'name'): ex_boot_disk = self.ex_get_volume(ex_boot_disk, zone=location) # Use disks[].initializeParams to auto-create the boot disk if not ex_disks_gce_struct and not ex_boot_disk: ex_disks_gce_struct = [{ 'autoDelete': ex_disk_auto_delete, 'boot': True, 'type': 'PERSISTENT', 'mode': 'READ_WRITE', 'deviceName': name, 'initializeParams': { 'diskName': name, 'diskType': ex_disk_type.extra['selfLink'], 'sourceImage': image.extra['selfLink'] } }] request, node_data = self._create_node_req( name, size, image, location, ex_network, ex_tags, ex_metadata, ex_boot_disk, external_ip, ex_disk_type, ex_disk_auto_delete, ex_service_accounts, description, ex_can_ip_forward, ex_disks_gce_struct, ex_nic_gce_struct, ex_on_host_maintenance, ex_automatic_restart, ex_preemptible, ex_subnetwork) self.connection.async_request(request, method='POST', data=node_data) return self.ex_get_node(name, location.name) def ex_create_instancetemplate( self, name, size, source=None, image=None, disk_type='pd-standard', disk_auto_delete=True, network='default', subnetwork=None, can_ip_forward=None, external_ip='ephemeral', service_accounts=None, on_host_maintenance=None, automatic_restart=None, preemptible=None, tags=None, metadata=None, description=None, disks_gce_struct=None, nic_gce_struct=None): """ Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param name: The name of the node to create. :type name: ``str`` :param size: The machine type to use. :type size: ``str`` or :class:`GCENodeSize` :param image: The image to use to create the node (or, if attaching a persistent disk, the image used to create the disk) :type image: ``str`` or :class:`GCENodeImage` or ``None`` :keyword network: The network to associate with the template. :type network: ``str`` or :class:`GCENetwork` :keyword subnetwork: The subnetwork to associate with the node. :type subnetwork: ``str`` or :class:`GCESubnetwork` :keyword tags: A list of tags to associate with the node. :type tags: ``list`` of ``str`` or ``None`` :keyword metadata: Metadata dictionary for instance. :type metadata: ``dict`` or ``None`` :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. To use an existing static IP address, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` or ``str`` or ``None`` :keyword disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type disk_type: ``str`` or :class:`GCEDiskType` :keyword disk_auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type disk_auto_delete: ``bool`` :keyword service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type service_accounts: ``list`` :keyword description: The description of the node (instance). :type description: ``str`` or ``None`` :keyword can_ip_forward: Set to ``True`` to allow this node to send/receive non-matching src/dst packets. :type can_ip_forward: ``bool`` or ``None`` :keyword disks_gce_struct: Support for passing in the GCE-specific formatted disks[] structure. No attempt is made to ensure proper formatting of the disks[] structure. Using this structure obviates the need of using other disk params like 'ex_boot_disk', etc. See the GCE docs for specific details. :type disks_gce_struct: ``list`` or ``None`` :keyword nic_gce_struct: Support passing in the GCE-specific formatted networkInterfaces[] structure. No attempt is made to ensure proper formatting of the networkInterfaces[] data. Using this structure obviates the need of using 'external_ip' and 'ex_network'. See the GCE docs for details. :type nic_gce_struct: ``list`` or ``None`` :keyword on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type ex_on_host_maintenance: ``str`` or ``None`` :keyword automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type automatic_restart: ``bool`` or ``None`` :keyword preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type preemptible: ``bool`` or ``None`` :return: An Instance Template object. :rtype: :class:`GCEInstanceTemplate` """ request = "/global/instanceTemplates" properties = self._create_instance_properties( name, node_size=size, source=source, image=image, disk_type=disk_type, disk_auto_delete=True, external_ip=external_ip, network=network, subnetwork=subnetwork, can_ip_forward=can_ip_forward, service_accounts=service_accounts, on_host_maintenance=on_host_maintenance, automatic_restart=automatic_restart, preemptible=preemptible, tags=tags, metadata=metadata, description=description, disks_gce_struct=disks_gce_struct, nic_gce_struct=nic_gce_struct, use_selflinks=False) request_data = {'name': name, 'description': description, 'properties': properties} self.connection.async_request(request, method='POST', data=request_data) return self.ex_get_instancetemplate(name) def _create_instance_properties( self, name, node_size, source=None, image=None, disk_type='pd-standard', disk_auto_delete=True, network='default', subnetwork=None, external_ip='ephemeral', can_ip_forward=None, service_accounts=None, on_host_maintenance=None, automatic_restart=None, preemptible=None, tags=None, metadata=None, description=None, disks_gce_struct=None, nic_gce_struct=None, use_selflinks=True): """ Create the GCE instance properties needed for instance templates. :param node_size: The machine type to use. :type node_size: ``str`` or :class:`GCENodeSize` :keyword source: A source disk to attach to the instance. Cannot specify both 'image' and 'source'. :type source: :class:`StorageVolume` or ``str`` or ``None`` :param image: The image to use to create the node. Cannot specify both 'image' and 'source'. :type image: ``str`` or :class:`GCENodeImage` or ``None`` :keyword disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type disk_type: ``str`` or :class:`GCEDiskType` :keyword disk_auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type disk_auto_delete: ``bool`` :keyword network: The network to associate with the node. :type network: ``str`` or :class:`GCENetwork` :keyword subnetwork: The Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. :type subnetwork: :class: `GCESubnetwork` or None :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. To use an existing static IP address, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` or ``str`` or ``None`` :keyword can_ip_forward: Set to ``True`` to allow this node to send/receive non-matching src/dst packets. :type can_ip_forward: ``bool`` or ``None`` :keyword service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type service_accounts: ``list`` :keyword on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type on_host_maintenance: ``str`` or ``None`` :keyword automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type automatic_restart: ``bool`` or ``None`` :keyword preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type preemptible: ``bool`` or ``None`` :keyword tags: A list of tags to associate with the node. :type tags: ``list`` of ``str`` or ``None`` :keyword metadata: Metadata dictionary for instance. :type metadata: ``dict`` or ``None`` :keyword description: The description of the node (instance). :type description: ``str`` or ``None`` :keyword disks_gce_struct: Support for passing in the GCE-specific formatted disks[] structure. No attempt is made to ensure proper formatting of the disks[] structure. Using this structure obviates the need of using other disk params like 'boot_disk', etc. See the GCE docs for specific details. :type disks_gce_struct: ``list`` or ``None`` :keyword nic_gce_struct: Support passing in the GCE-specific formatted networkInterfaces[] structure. No attempt is made to ensure proper formatting of the networkInterfaces[] data. Using this structure obviates the need of using 'external_ip' and 'network'. See the GCE docs for details. :type nic_gce_struct: ``list`` or ``None`` :return: A dictionary formatted for use with the GCE API. :rtype: ``dict`` """ instance_properties = {} # build disks if not image and not source and not disks_gce_struct: raise ValueError("Missing root device or image. Must specify an " "'image', source, or use the " "'disks_gce_struct'.") if source and disks_gce_struct: raise ValueError("Cannot specify both 'source' and " "'disks_gce_struct'. Use one or the other.") if disks_gce_struct: instance_properties['disks'] = disks_gce_struct else: disk_name = None device_name = None if source: disk_name = source.name # TODO(supertom): what about device name? device_name = source.name image = None instance_properties['disks'] = [self._build_disk_gce_struct( device_name, source=source, disk_type=disk_type, image=image, disk_name=disk_name, usage_type='PERSISTENT', mount_mode='READ_WRITE', auto_delete=disk_auto_delete, is_boot=True, use_selflinks=use_selflinks)] # build network interfaces if nic_gce_struct is not None: if hasattr(external_ip, 'address'): raise ValueError("Cannot specify both a static IP address " "and 'nic_gce_struct'. Use one or the " "other.") if hasattr(network, 'name'): if network.name == 'default': # assume this is just the default value from create_node() # and since the user specified ex_nic_gce_struct, the # struct should take precedence network = None else: raise ValueError("Cannot specify both 'network' and " "'nic_gce_struct'. Use one or the " "other.") instance_properties['networkInterfaces'] = nic_gce_struct else: instance_properties['networkInterfaces'] = [ self._build_network_gce_struct( network=network, subnetwork=subnetwork, external_ip=external_ip, use_selflinks=True) ] # build scheduling scheduling = self._build_scheduling_gce_struct( on_host_maintenance, automatic_restart, preemptible) if scheduling: instance_properties['scheduling'] = scheduling # build service accounts/scopes instance_properties[ 'serviceAccounts'] = self._build_service_accounts_gce_list( service_accounts) # include general properties if description: instance_properties['description'] = str(description) if tags: instance_properties['tags'] = {'items': tags} if metadata: instance_properties['metadata'] = self._format_metadata( fingerprint='na', metadata=metadata) if can_ip_forward: instance_properties['canIpForward'] = True instance_properties['machineType'] = self._get_selflink_or_name( obj=node_size, get_selflinks=use_selflinks, objname='size') return instance_properties def _build_disk_gce_struct( self, device_name, source=None, disk_type=None, disk_size=None, image=None, disk_name=None, is_boot=True, mount_mode='READ_WRITE', usage_type='PERSISTENT', auto_delete=True, use_selflinks=True): """ Generates the GCP dict for a disk. :param device_name: Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. Defaults to disk_name. :type device_name: ``str`` :keyword source: The disk to attach to the instance. :type source: ``str`` of selfLink, :class:`StorageVolume` or None :keyword disk_type: Specify a URL or DiskType object. :type disk_type: ``str`` or :class:`GCEDiskType` or ``None`` :keyword image: The image to use to create the disk. :type image: :class:`GCENodeImage` or ``None`` :keyword disk_size: Integer in gigabytes. :type disk_size: ``int`` :param disk_name: Specifies the disk name. If not specified, the default is to use the device_name. :type disk_name: ``str`` :keyword mount_mode: The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. :type mount_mode: ``str`` :keyword usage_type: Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. :type usage_type: ``str`` :keyword auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type auto_delete: ``bool`` :return: Dictionary to be used in disk-portion of instance API call. :rtype: ``dict`` """ # validation if source is None and image is None: raise ValueError( "Either the 'source' or 'image' argument must be specified.") if not isinstance(auto_delete, bool): raise ValueError("auto_delete field is not a bool.") if disk_size is not None and not disk_size.isdigit(): raise ValueError("disk_size must be a digit, '%s' provided." % (disk_size)) mount_modes = ['READ_WRITE', 'READ_ONLY'] if mount_mode not in mount_modes: raise ValueError("mount mode must be one of: %s." % (','.join(mount_modes))) usage_types = ['PERSISTENT', 'SCRATCH'] if usage_type not in usage_types: raise ValueError("usage type must be one of: %s." % (','.join(usage_types))) disk = {} if not disk_name: disk_name = device_name if source is not None: disk['source'] = self._get_selflink_or_name( obj=source, get_selflinks=use_selflinks, objname='volume') else: # create new disk # we need the URL of the image, always. image = self._get_selflink_or_name(obj=image, get_selflinks=True, objname='image') disk_type = self._get_selflink_or_name( obj=disk_type, get_selflinks=use_selflinks, objname='disktype') disk['initializeParams'] = { 'diskName': disk_name, 'diskType': disk_type, 'sourceImage': image, } if disk_size is not None: disk['initializeParams']['diskSizeGb'] = disk_size # add in basic attributes disk.update({'boot': is_boot, 'type': usage_type, 'mode': mount_mode, 'deviceName': device_name, 'autoDelete': auto_delete}) return disk def _get_selflink_or_name(self, obj, get_selflinks=True, objname=None): """ Return the selflink or name, given a name or object. Will try to fetch the appropriate object if necessary (assumes we only need one parameter to fetch the object, no introspection is performed). :param obj: object to test. :type obj: ``str`` or ``object`` :param get_selflinks: Inform if we should return selfLinks or just the name. Default is True. :param get_selflinks: ``bool`` :param objname: string to use in constructing method call :type objname: ``str`` or None :return: URL from extra['selfLink'] or name :rtype: ``str`` """ if get_selflinks: if not hasattr(obj, 'name'): if objname: getobj = getattr(self, 'ex_get_%s' % (objname)) obj = getobj(obj) else: raise ValueError( "objname must be set if selflinks is True.") return obj.extra['selfLink'] else: if not hasattr(obj, 'name'): return obj else: return obj.name def _build_network_gce_struct(self, network, subnetwork=None, external_ip=None, use_selflinks=True): """ Build network interface dict for use in the GCE API. Note: Must be wrapped in a list before passing to the GCE API. :param network: The network to associate with the node. :type network: :class:`GCENetwork` :keyword subnetwork: The subnetwork to include. :type subnetwork: :class:`GCESubNetwork` :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. To use an existing static IP address, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` :return: network interface dict :rtype: ``dict`` """ ni = {} ni = {'kind': 'compute#instanceNetworkInterface'} if network is None: network = 'default' ni['network'] = self._get_selflink_or_name( obj=network, get_selflinks=use_selflinks, objname='network') if subnetwork: ni['subnetwork'] = self._get_selflink_or_name( obj=subnetwork, get_selflinks=use_selflinks, objname='subnetwork') if external_ip: access_configs = [{'name': 'External NAT', 'type': 'ONE_TO_ONE_NAT'}] if hasattr(external_ip, 'address'): access_configs[0]['natIP'] = external_ip.address ni['accessConfigs'] = access_configs return ni def _build_service_account_gce_struct( self, service_account, default_email='default', default_scope='devstorage.read_only'): """ Helper to create Service Account dict. Use _build_service_accounts_gce_list to create a list ready for the GCE API. :param: service_account: dictionarie containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type service_account: ``dict`` or None :return: dict usable in GCE API call. :rtype: ``dict`` """ if not isinstance(service_account, dict): raise ValueError( "service_account not in the correct format," "'%s - %s'" % (str(type(service_account)), str(service_account))) sa = {} if 'email' not in service_account: sa['email'] = default_email if 'scopes' not in service_account: sa['scopes'] = [self.AUTH_URL + default_scope] else: ps = [] for scope in service_account['scopes']: if scope.startswith(self.AUTH_URL): ps.append(scope) elif scope in self.SA_SCOPES_MAP: ps.append(self.AUTH_URL + self.SA_SCOPES_MAP[scope]) else: ps.append(self.AUTH_URL + scope) sa['scopes'] = ps return sa def _build_service_accounts_gce_list(self, service_accounts=None, default_email='default', default_scope='devstorage.read_only'): """ Helper to create service account list for GCE API. :keyword service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type service_accounts: ``list`` of ``dict`` or None :return: list of dictionaries usable in the GCE API. :rtype: ``list`` of ``dict`` """ gce_service_accounts = [] if not service_accounts: gce_service_accounts = [{ 'email': default_email, 'scopes': [self.AUTH_URL + default_scope] }] elif not isinstance(service_accounts, list): raise ValueError("service_accounts field is not a list.") else: for sa in service_accounts: gce_service_accounts.append( self._build_service_account_gce_struct(service_account=sa)) return gce_service_accounts def _build_scheduling_gce_struct(self, on_host_maintenance=None, automatic_restart=None, preemptible=None): """ Build the scheduling dict suitable for use with the GCE API. :param on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type on_host_maintenance: ``str`` or ``None`` :param automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type automatic_restart: ``bool`` or ``None`` :param preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type preemptible: ``bool`` or ``None`` :return: A dictionary of scheduling options for the GCE API. :rtype: ``dict`` """ scheduling = {} if preemptible is not None: if isinstance(preemptible, bool): scheduling['preemptible'] = preemptible else: raise ValueError("boolean expected for preemptible") if on_host_maintenance is not None: maint_opts = ['MIGRATE', 'TERMINATE'] if isinstance(on_host_maintenance, str) and on_host_maintenance in maint_opts: if preemptible is True and on_host_maintenance == 'MIGRATE': raise ValueError(("host maintenance cannot be 'MIGRATE' " "if instance is preemptible.")) scheduling['onHostMaintenance'] = on_host_maintenance else: raise ValueError("host maintenance must be one of %s" % (','.join(maint_opts))) if automatic_restart is not None: if isinstance(automatic_restart, bool): if automatic_restart is True and preemptible is True: raise ValueError( "instance cannot be restarted if it is preemptible.") scheduling['automaticRestart'] = automatic_restart else: raise ValueError("boolean expected for automatic") return scheduling def ex_create_multiple_nodes( self, base_name, size, image, number, location=None, ex_network='default', ex_subnetwork=None, ex_tags=None, ex_metadata=None, ignore_errors=True, use_existing_disk=True, poll_interval=2, external_ip='ephemeral', ex_disk_type='pd-standard', ex_disk_auto_delete=True, ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT, description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None, ex_nic_gce_struct=None, ex_on_host_maintenance=None, ex_automatic_restart=None, ex_image_family=None, ex_preemptible=None): """ Create multiple nodes and return a list of Node objects. Nodes will be named with the base name and a number. For example, if the base name is 'libcloud' and you create 3 nodes, they will be named:: libcloud-000 libcloud-001 libcloud-002 :param base_name: The base name of the nodes to create. :type base_name: ``str`` :param size: The machine type to use. :type size: ``str`` or :class:`GCENodeSize` :param image: The image to use to create the nodes. :type image: ``str`` or :class:`GCENodeImage` :param number: The number of nodes to create. :type number: ``int`` :keyword location: The location (zone) to create the nodes in. :type location: ``str`` or :class:`NodeLocation` or :class:`GCEZone` or ``None`` :keyword ex_network: The network to associate with the nodes. :type ex_network: ``str`` or :class:`GCENetwork` :keyword ex_tags: A list of tags to associate with the nodes. :type ex_tags: ``list`` of ``str`` or ``None`` :keyword ex_metadata: Metadata dictionary for instances. :type ex_metadata: ``dict`` or ``None`` :keyword ignore_errors: If True, don't raise Exceptions if one or more nodes fails. :type ignore_errors: ``bool`` :keyword use_existing_disk: If True and if an existing disk with the same name/location is found, use that disk instead of creating a new one. :type use_existing_disk: ``bool`` :keyword poll_interval: Number of seconds between status checks. :type poll_interval: ``int`` :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. (Static addresses are not supported for multiple node creation.) :type external_ip: ``str`` or None :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type ex_disk_type: ``str`` or :class:`GCEDiskType` :keyword ex_disk_auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type ex_disk_auto_delete: ``bool`` :keyword ex_service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type ex_service_accounts: ``list`` :keyword timeout: The number of seconds to wait for all nodes to be created before timing out. :type timeout: ``int`` :keyword description: The description of the node (instance). :type description: ``str`` or ``None`` :keyword ex_can_ip_forward: Set to ``True`` to allow this node to send/receive non-matching src/dst packets. :type ex_can_ip_forward: ``bool`` or ``None`` :keyword ex_preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type ex_preemptible: ``bool`` or ``None`` :keyword ex_disks_gce_struct: Support for passing in the GCE-specific formatted disks[] structure. No attempt is made to ensure proper formatting of the disks[] structure. Using this structure obviates the need of using other disk params like 'ex_boot_disk', etc. See the GCE docs for specific details. :type ex_disks_gce_struct: ``list`` or ``None`` :keyword ex_nic_gce_struct: Support passing in the GCE-specific formatted networkInterfaces[] structure. No attempt is made to ensure proper formatting of the networkInterfaces[] data. Using this structure obviates the need of using 'external_ip' and 'ex_network'. See the GCE docs for details. :type ex_nic_gce_struct: ``list`` or ``None`` :keyword ex_on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type ex_on_host_maintenance: ``str`` or ``None`` :keyword ex_automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type ex_automatic_restart: ``bool`` or ``None`` :keyword ex_image_family: Determine image from an 'Image Family' instead of by name. 'image' should be None to use this keyword. :type ex_image_family: ``str`` or ``None`` :return: A list of Node objects for the new nodes. :rtype: ``list`` of :class:`Node` """ if image and ex_disks_gce_struct: raise ValueError("Cannot specify both 'image' and " "'ex_disks_gce_struct'.") if image and ex_image_family: raise ValueError("Cannot specify both 'image' and " "'ex_image_family'") location = location or self.zone if not hasattr(location, 'name'): location = self.ex_get_zone(location) if not hasattr(size, 'name'): size = self.ex_get_size(size, location) if not hasattr(ex_network, 'name'): ex_network = self.ex_get_network(ex_network) if ex_subnetwork and not hasattr(ex_subnetwork, 'name'): ex_subnetwork = \ self.ex_get_subnetwork(ex_subnetwork, region=self._get_region_from_zone( location)) if ex_image_family: image = self.ex_get_image_from_family(ex_image_family) if image and not hasattr(image, 'name'): image = self.ex_get_image(image) if not hasattr(ex_disk_type, 'name'): ex_disk_type = self.ex_get_disktype(ex_disk_type, zone=location) node_attrs = {'size': size, 'image': image, 'location': location, 'network': ex_network, 'subnetwork': ex_subnetwork, 'tags': ex_tags, 'metadata': ex_metadata, 'ignore_errors': ignore_errors, 'use_existing_disk': use_existing_disk, 'external_ip': external_ip, 'ex_disk_type': ex_disk_type, 'ex_disk_auto_delete': ex_disk_auto_delete, 'ex_service_accounts': ex_service_accounts, 'description': description, 'ex_can_ip_forward': ex_can_ip_forward, 'ex_disks_gce_struct': ex_disks_gce_struct, 'ex_nic_gce_struct': ex_nic_gce_struct, 'ex_on_host_maintenance': ex_on_host_maintenance, 'ex_automatic_restart': ex_automatic_restart, 'ex_preemptible': ex_preemptible} # List for holding the status information for disk/node creation. status_list = [] for i in range(number): name = '%s-%03d' % (base_name, i) status = {'name': name, 'node_response': None, 'node': None} status_list.append(status) start_time = time.time() complete = False while not complete: if (time.time() - start_time >= timeout): raise Exception("Timeout (%s sec) while waiting for multiple " "instances") complete = True time.sleep(poll_interval) for status in status_list: # Create the node or check status if already in progress. if not status['node']: if not status['node_response']: self._multi_create_node(status, node_attrs) else: self._multi_check_node(status, node_attrs) # If any of the nodes have not been created (or failed) we are # not done yet. if not status['node']: complete = False # Return list of nodes node_list = [] for status in status_list: node_list.append(status['node']) return node_list def ex_create_targethttpproxy(self, name, urlmap): """ Create a target HTTP proxy. :param name: Name of target HTTP proxy :type name: ``str`` :keyword urlmap: URL map defining the mapping from URl to the backendservice. :type healthchecks: ``str`` or :class:`GCEUrlMap` :return: Target Pool object :rtype: :class:`GCETargetPool` """ targetproxy_data = {'name': name} if not hasattr(urlmap, 'name'): urlmap = self.ex_get_urlmap(urlmap) targetproxy_data['urlMap'] = urlmap.extra['selfLink'] request = '/global/targetHttpProxies' self.connection.async_request(request, method='POST', data=targetproxy_data) return self.ex_get_targethttpproxy(name) def ex_create_targethttpsproxy(self, name, urlmap, sslcertificates, description=None): """ Creates a TargetHttpsProxy resource in the specified project using the data included in the request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :type name: ``str`` :param sslcertificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified. :type sslcertificates: ``list`` of :class:`GCESslcertificates` :param urlmap: A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. :type urlmap: :class:`GCEUrlMap` :keyword description: An optional description of this resource. Provide this property when you create the resource. :type description: ``str`` :return: `GCETargetHttpsProxy` object. :rtype: :class:`GCETargetHttpsProxy` """ request = "/global/targetHttpsProxies" % () request_data = {} request_data['name'] = name request_data['description'] = description request_data['sslCertificates'] = [x.extra['selfLink'] for x in sslcertificates] request_data['urlMap'] = urlmap.extra['selfLink'] self.connection.async_request(request, method='POST', data=request_data) return self.ex_get_targethttpsproxy(name) def ex_create_targetinstance(self, name, zone=None, node=None, description=None, nat_policy="NO_NAT"): """ Create a target instance. :param name: Name of target instance :type name: ``str`` :keyword region: Zone to create the target pool in. Defaults to self.zone :type region: ``str`` or :class:`GCEZone` or ``None`` :keyword node: The actual instance to be used as the traffic target. :type node: ``str`` or :class:`Node` :keyword description: A text description for the target instance :type description: ``str`` or ``None`` :keyword nat_policy: The NAT option for how IPs are NAT'd to the node. :type nat_policy: ``str`` :return: Target Instance object :rtype: :class:`GCETargetInstance` """ zone = zone or self.zone targetinstance_data = {} targetinstance_data['name'] = name if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) targetinstance_data['zone'] = zone.extra['selfLink'] if node is not None: if not hasattr(node, 'name'): node = self.ex_get_node(node, zone) targetinstance_data['instance'] = node.extra['selfLink'] targetinstance_data['natPolicy'] = nat_policy if description: targetinstance_data['description'] = description request = '/zones/%s/targetInstances' % (zone.name) self.connection.async_request(request, method='POST', data=targetinstance_data) return self.ex_get_targetinstance(name, zone) def ex_create_targetpool(self, name, region=None, healthchecks=None, nodes=None, session_affinity=None, backup_pool=None, failover_ratio=None): """ Create a target pool. :param name: Name of target pool :type name: ``str`` :keyword region: Region to create the target pool in. Defaults to self.region :type region: ``str`` or :class:`GCERegion` or ``None`` :keyword healthchecks: Optional list of health checks to attach :type healthchecks: ``list`` of ``str`` or :class:`GCEHealthCheck` :keyword nodes: Optional list of nodes to attach to the pool :type nodes: ``list`` of ``str`` or :class:`Node` :keyword session_affinity: Optional algorithm to use for session affinity. :type session_affinity: ``str`` :keyword backup_pool: Optional backup targetpool to take over traffic if the failover_ratio is exceeded. :type backup_pool: ``GCETargetPool`` or ``None`` :keyword failover_ratio: The percentage of healthy VMs must fall at or below this value before traffic will be sent to the backup_pool. :type failover_ratio: :class:`GCETargetPool` or ``None`` :return: Target Pool object :rtype: :class:`GCETargetPool` """ targetpool_data = {} region = region or self.region if backup_pool and not failover_ratio: failover_ratio = 0.1 targetpool_data['failoverRatio'] = failover_ratio targetpool_data['backupPool'] = backup_pool.extra['selfLink'] if failover_ratio and not backup_pool: e = "Must supply a backup targetPool when setting failover_ratio" raise ValueError(e) targetpool_data['name'] = name if not hasattr(region, 'name'): region = self.ex_get_region(region) targetpool_data['region'] = region.extra['selfLink'] if healthchecks: if not hasattr(healthchecks[0], 'name'): hc_list = [self.ex_get_healthcheck(h).extra['selfLink'] for h in healthchecks] else: hc_list = [h.extra['selfLink'] for h in healthchecks] targetpool_data['healthChecks'] = hc_list if nodes: if not hasattr(nodes[0], 'name'): node_list = [self.ex_get_node(n, 'all').extra['selfLink'] for n in nodes] else: node_list = [n.extra['selfLink'] for n in nodes] targetpool_data['instances'] = node_list if session_affinity: targetpool_data['sessionAffinity'] = session_affinity request = '/regions/%s/targetPools' % (region.name) self.connection.async_request(request, method='POST', data=targetpool_data) return self.ex_get_targetpool(name, region) def ex_create_urlmap(self, name, default_service): """ Create a URL Map. :param name: Name of the URL Map. :type name: ``str`` :keyword default_service: Default backend service for the map. :type default_service: ``str`` or :class:`GCEBackendService` :return: URL Map object :rtype: :class:`GCEUrlMap` """ urlmap_data = {'name': name} # TODO: support hostRules, pathMatchers, tests if not hasattr(default_service, 'name'): default_service = self.ex_get_backendservice(default_service) urlmap_data['defaultService'] = default_service.extra['selfLink'] request = '/global/urlMaps' self.connection.async_request(request, method='POST', data=urlmap_data) return self.ex_get_urlmap(name) def create_volume(self, size, name, location=None, snapshot=None, image=None, use_existing=True, ex_disk_type='pd-standard', ex_image_family=None): """ Create a volume (disk). :param size: Size of volume to create (in GB). Can be None if image or snapshot is supplied. :type size: ``int`` or ``str`` or ``None`` :param name: Name of volume to create :type name: ``str`` :keyword location: Location (zone) to create the volume in :type location: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :keyword snapshot: Snapshot to create image from :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` :keyword image: Image to create disk from. :type image: :class:`GCENodeImage` or ``str`` or ``None`` :keyword use_existing: If True and a disk with the given name already exists, return an object for that disk instead of attempting to create a new disk. :type use_existing: ``bool`` :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type ex_disk_type: ``str`` or :class:`GCEDiskType` :keyword ex_image_family: Determine image from an 'Image Family' instead of by name. 'image' should be None to use this keyword. :type ex_image_family: ``str`` or ``None`` :return: Storage Volume object :rtype: :class:`StorageVolume` """ if image and ex_image_family: raise ValueError("Cannot specify both 'image' and " "'ex_image_family'") if ex_image_family: image = self.ex_get_image_from_family(ex_image_family) request, volume_data, params = self._create_vol_req( size, name, location, snapshot, image, ex_disk_type) try: self.connection.async_request(request, method='POST', data=volume_data, params=params) except ResourceExistsError: e = sys.exc_info()[1] if not use_existing: raise e return self.ex_get_volume(name, location) def create_volume_snapshot(self, volume, name): """ Create a snapshot of the provided Volume. :param volume: A StorageVolume object :type volume: :class:`StorageVolume` :return: A GCE Snapshot object :rtype: :class:`GCESnapshot` """ snapshot_data = {} snapshot_data['name'] = name request = '/zones/%s/disks/%s/createSnapshot' % ( volume.extra['zone'].name, volume.name) self.connection.async_request(request, method='POST', data=snapshot_data) return self.ex_get_snapshot(name) def list_volume_snapshots(self, volume): """ List snapshots created from the provided volume. For GCE, snapshots are global, but while the volume they were created from still exists, the source disk for the snapshot is tracked. :param volume: A StorageVolume object :type volume: :class:`StorageVolume` :return: A list of Snapshot objects :rtype: ``list`` of :class:`GCESnapshot` """ volume_snapshots = [] volume_link = volume.extra['selfLink'] all_snapshots = self.ex_list_snapshots() for snapshot in all_snapshots: if snapshot.extra['sourceDisk'] == volume_link: volume_snapshots.append(snapshot) return volume_snapshots def ex_update_autoscaler(self, autoscaler): """ Update an autoscaler with new values. To update, change the attributes of the autoscaler object and pass the updated object to the method. :param autoscaler: An Autoscaler object with updated values. :type autoscaler: :class:`GCEAutoscaler` :return: An Autoscaler object representing the new state. :rtype: :class:`GCEAutoscaler`` """ request = '/zones/%s/autoscalers' % (autoscaler.zone.name) as_data = {} as_data['name'] = autoscaler.name as_data['autoscalingPolicy'] = autoscaler.policy as_data['target'] = autoscaler.target.extra['selfLink'] self.connection.async_request(request, method='PUT', data=as_data) return self.ex_get_autoscaler(autoscaler.name, autoscaler.zone) def ex_update_healthcheck(self, healthcheck): """ Update a health check with new values. To update, change the attributes of the health check object and pass the updated object to the method. :param healthcheck: A healthcheck object with updated values. :type healthcheck: :class:`GCEHealthCheck` :return: An object representing the new state of the health check. :rtype: :class:`GCEHealthCheck` """ hc_data = {} hc_data['name'] = healthcheck.name hc_data['requestPath'] = healthcheck.path hc_data['port'] = healthcheck.port hc_data['checkIntervalSec'] = healthcheck.interval hc_data['timeoutSec'] = healthcheck.timeout hc_data['unhealthyThreshold'] = healthcheck.unhealthy_threshold hc_data['healthyThreshold'] = healthcheck.healthy_threshold if healthcheck.extra['host']: hc_data['host'] = healthcheck.extra['host'] if healthcheck.extra['description']: hc_data['description'] = healthcheck.extra['description'] request = '/global/httpHealthChecks/%s' % (healthcheck.name) self.connection.async_request(request, method='PUT', data=hc_data) return self.ex_get_healthcheck(healthcheck.name) def ex_update_firewall(self, firewall): """ Update a firewall with new values. To update, change the attributes of the firewall object and pass the updated object to the method. :param firewall: A firewall object with updated values. :type firewall: :class:`GCEFirewall` :return: An object representing the new state of the firewall. :rtype: :class:`GCEFirewall` """ firewall_data = {} firewall_data['name'] = firewall.name firewall_data['allowed'] = firewall.allowed firewall_data['network'] = firewall.network.extra['selfLink'] if firewall.source_ranges: firewall_data['sourceRanges'] = firewall.source_ranges if firewall.source_tags: firewall_data['sourceTags'] = firewall.source_tags if firewall.target_tags: firewall_data['targetTags'] = firewall.target_tags if firewall.extra['description']: firewall_data['description'] = firewall.extra['description'] request = '/global/firewalls/%s' % (firewall.name) self.connection.async_request(request, method='PUT', data=firewall_data) return self.ex_get_firewall(firewall.name) def ex_targethttpsproxy_set_sslcertificates(self, targethttpsproxy, sslcertificates): """ Replaces SslCertificates for TargetHttpsProxy. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param targethttpsproxy: Name of the TargetHttpsProxy resource to set an SslCertificates resource for. :type targethttpsproxy: ``str`` :param sslcertificates: sslcertificates to set. :type sslcertificates: ``list`` of :class:`GCESslCertificates` :return: True :rtype: ``bool`` """ request = "/targetHttpsProxies/%s/setSslCertificates" % ( targethttpsproxy.name) request_data = {'sslCertificates': [x.extra['selfLink'] for x in sslcertificates]} self.connection.async_request(request, method='POST', data=request_data) return True def ex_targethttpsproxy_set_urlmap(self, targethttpsproxy, urlmap): """ Changes the URL map for TargetHttpsProxy. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param targethttpsproxy: Name of the TargetHttpsProxy resource whose URL map is to be set. :type targethttpsproxy: ``str`` :param urlmap: urlmap to set. :type urlmap: :class:`GCEUrlMap` :return: True :rtype: ``bool`` """ request = "/targetHttpsProxies/%s/setUrlMap" % (targethttpsproxy.name) request_data = {'urlMap': urlmap.extra['selfLink']} self.connection.async_request(request, method='POST', data=request_data) return True def ex_targetpool_get_health(self, targetpool, node=None): """ Return a hash of target pool instances and their health. :param targetpool: Targetpool containing healthchecked instances. :type targetpool: :class:`GCETargetPool` :param node: Optional node to specify if only a specific node's health status should be returned :type node: ``str``, ``Node``, or ``None`` :return: List of hashes of instances and their respective health, e.g. [{'node': ``Node``, 'health': 'UNHEALTHY'}, ...] :rtype: ``list`` of ``dict`` """ health = [] region_name = targetpool.region.name request = '/regions/%s/targetPools/%s/getHealth' % (region_name, targetpool.name) if node is not None: if hasattr(node, 'name'): node_name = node.name else: node_name = node nodes = targetpool.nodes for node_object in nodes: if node: if node_name == node_object.name: body = {'instance': node_object.extra['selfLink']} resp = self.connection.request(request, method='POST', data=body).object status = resp['healthStatus'][0]['healthState'] health.append({'node': node_object, 'health': status}) else: body = {'instance': node_object.extra['selfLink']} resp = self.connection.request(request, method='POST', data=body).object status = resp['healthStatus'][0]['healthState'] health.append({'node': node_object, 'health': status}) return health def ex_targetpool_set_backup_targetpool( self, targetpool, backup_targetpool, failover_ratio=0.1): """ Set a backup targetpool. :param targetpool: The existing primary targetpool :type targetpool: :class:`GCETargetPool` :param backup_targetpool: The existing targetpool to use for failover traffic. :type backup_targetpool: :class:`GCETargetPool` :param failover_ratio: The percentage of healthy VMs must fall at or below this value before traffic will be sent to the backup targetpool (default 0.10) :type failover_ratio: ``float`` :return: True if successful :rtype: ``bool`` """ region = targetpool.region.name name = targetpool.name req_data = {'target': backup_targetpool.extra['selfLink']} params = {'failoverRatio': failover_ratio} request = '/regions/%s/targetPools/%s/setBackup' % (region, name) self.connection.async_request(request, method='POST', data=req_data, params=params) return True def ex_targetpool_add_node(self, targetpool, node): """ Add a node to a target pool. :param targetpool: The targetpool to add node to :type targetpool: ``str`` or :class:`GCETargetPool` :param node: The node to add :type node: ``str`` or :class:`Node` :return: True if successful :rtype: ``bool`` """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) if hasattr(node, 'name'): node_uri = node.extra['selfLink'] else: if node.startswith('https://'): node_uri = node else: node = self.ex_get_node(node, 'all') node_uri = node.extra['selfLink'] targetpool_data = {'instances': [{'instance': node_uri}]} request = '/regions/%s/targetPools/%s/addInstance' % ( targetpool.region.name, targetpool.name) self.connection.async_request(request, method='POST', data=targetpool_data) if all((node_uri != n) and (not hasattr(n, 'extra') or n.extra['selfLink'] != node_uri) for n in targetpool.nodes): targetpool.nodes.append(node) return True def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): """ Add a health check to a target pool. :param targetpool: The targetpool to add health check to :type targetpool: ``str`` or :class:`GCETargetPool` :param healthcheck: The healthcheck to add :type healthcheck: ``str`` or :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) targetpool_data = { 'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } request = '/regions/%s/targetPools/%s/addHealthCheck' % ( targetpool.region.name, targetpool.name) self.connection.async_request(request, method='POST', data=targetpool_data) targetpool.healthchecks.append(healthcheck) return True def ex_targetpool_remove_node(self, targetpool, node): """ Remove a node from a target pool. :param targetpool: The targetpool to remove node from :type targetpool: ``str`` or :class:`GCETargetPool` :param node: The node to remove :type node: ``str`` or :class:`Node` :return: True if successful :rtype: ``bool`` """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) if hasattr(node, 'name'): node_uri = node.extra['selfLink'] else: if node.startswith('https://'): node_uri = node else: node = self.ex_get_node(node, 'all') node_uri = node.extra['selfLink'] targetpool_data = {'instances': [{'instance': node_uri}]} request = '/regions/%s/targetPools/%s/removeInstance' % ( targetpool.region.name, targetpool.name) self.connection.async_request(request, method='POST', data=targetpool_data) # Remove node object from node list index = None for i, nd in enumerate(targetpool.nodes): if nd == node_uri or (hasattr(nd, 'extra') and nd.extra['selfLink'] == node_uri): index = i break if index is not None: targetpool.nodes.pop(index) return True def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): """ Remove a health check from a target pool. :param targetpool: The targetpool to remove health check from :type targetpool: ``str`` or :class:`GCETargetPool` :param healthcheck: The healthcheck to remove :type healthcheck: ``str`` or :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) targetpool_data = { 'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( targetpool.region.name, targetpool.name) self.connection.async_request(request, method='POST', data=targetpool_data) # Remove healthcheck object from healthchecks list index = None for i, hc in enumerate(targetpool.healthchecks): if hc.name == healthcheck.name: index = i if index is not None: targetpool.healthchecks.pop(index) return True def ex_instancegroup_add_instances(self, instancegroup, node_list): """ Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The Instance Group where you are adding instances. :type instancegroup: :class:``GCEInstanceGroup`` :param node_list: List of nodes to add. :type node_list: ``list`` of :class:`Node` or ``list`` of :class:`GCENode` :return: Return True if successful. :rtype: ``bool`` """ request = "/zones/%s/instanceGroups/%s/addInstances" % ( instancegroup.zone.name, instancegroup.name) request_data = {'instances': [{'instance': x.extra['selfLink']} for x in node_list]} self.connection.async_request(request, method='POST', data=request_data) return True def ex_instancegroup_remove_instances(self, instancegroup, node_list): """ Removes one or more instances from the specified instance group, but does not delete those instances. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The Instance Group where the specified instances will be removed. :type instancegroup: :class:``GCEInstanceGroup`` :param node_list: List of nodes to add. :type node_list: ``list`` of :class:`Node` or ``list`` of :class:`GCENode` :return: True if successful. :rtype: ``bool`` """ request = "/zones/%s/instanceGroups/%s/removeInstances" % ( instancegroup.zone.name, instancegroup.name) request_data = {'instances': [{'instance': x.extra['selfLink']} for x in node_list]} self.connection.async_request(request, method='POST', data=request_data) return True def ex_instancegroup_list_instances(self, instancegroup): """ Lists the instances in the specified instance group. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :param instancegroup: The Instance Group where from which you want to generate a list of included instances. :type instancegroup: :class:`GCEInstanceGroup` :return: List of :class:`GCENode` objects. :rtype: ``list`` of :class:`GCENode` objects. """ request = "/zones/%s/instanceGroups/%s/listInstances" % ( instancegroup.zone.name, instancegroup.name) # Note: This API requires a 'POST'. response = self.connection.request(request, method='POST').object list_data = [] if 'items' in response: for v in response['items']: instance_info = self._get_components_from_path(v['instance']) list_data.append( self.ex_get_node(instance_info['name'], instance_info[ 'zone'])) return list_data def ex_instancegroup_set_named_ports(self, instancegroup, named_ports=[]): """ Sets the named ports for the specified instance group. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The Instance Group where where the named ports are updated. :type instancegroup: :class:`GCEInstanceGroup` :param named_ports: Assigns a name to a port number. For example: {name: "http", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports apply to all instances in this instance group. :type named_ports: ``list`` of {'name': ``str``, 'port`: ``int``} :return: Return True if successful. :rtype: ``bool`` """ if not isinstance(named_ports, list): raise ValueError("'named_ports' must be a list of name/port" " dictionaries.") request = "/zones/%s/instanceGroups/%s/setNamedPorts" % ( instancegroup.zone.name, instancegroup.name) request_data = {'namedPorts': named_ports, 'fingerprint': instancegroup.extra['fingerprint']} self.connection.async_request(request, method='POST', data=request_data) return True def ex_destroy_instancegroup(self, instancegroup): """ Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancegroup: The name of the instance group to delete. :type instancegroup: :class:`GCEInstanceGroup` :return: Return True if successful. :rtype: ``bool`` """ request = "/zones/%s/instanceGroups/%s" % (instancegroup.zone.name, instancegroup.name) request_data = {} self.connection.async_request(request, method='DELETE', data=request_data) return True def ex_instancegroupmanager_list_managed_instances(self, manager): """ Lists all of the instances in the Managed Instance Group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is 'CREATING'. Note that 'instanceStatus' might not be available, for example, if currentAction is 'CREATING' or 'RECREATING'. If a previous action failed, the list displays the errors for that failed action. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly 'currentAction' values are one of: 'ABANDONING', 'CREATING', 'DELETING', 'NONE', 'RECREATING', 'REFRESHING', 'RESTARTING' :param manager: Instance Group Manager to operate on. :type manager: :class:`GCEInstanceGroupManager` :return: ``list`` of ``dict`` containing 'name', 'zone', 'lastAttempt', 'currentAction', 'instance' and 'instanceStatus'. :rtype: ``list`` """ request = "/zones/%s/instanceGroupManagers/%s/listManagedInstances" % ( manager.zone.name, manager.name) # Note: This API requires a 'POST'. response = self.connection.request(request, method='POST').object instance_data = [] if 'managedInstances' in response: for i in response['managedInstances']: i['name'] = self._get_components_from_path(i['instance'])[ 'name'] i['zone'] = manager.zone.name instance_data.append(i) return instance_data def ex_instancegroupmanager_set_instancetemplate(self, manager, instancetemplate): """ Set the Instance Template for this Instance Group. Existing VMs are not recreated by setting a new InstanceTemplate. :param manager: Instance Group Manager to operate on. :type manager: :class:`GCEInstanceGroupManager` :param instancetemplate: Instance Template to set. :type instancetemplate: :class:`GCEInstanceTemplate` :return: True if successful :rtype: ``bool`` """ req_data = {'instanceTemplate': instancetemplate.extra['selfLink']} request = '/zones/%s/instanceGroupManagers/' \ '%s/setInstanceTemplate' % (manager.zone.name, manager.name) self.connection.async_request(request, method='POST', data=req_data) return True def ex_instancegroupmanager_recreate_instances(self, manager, instances=None): """ Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method or querying the managed instance group directly. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param manager: Required. The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035. :type manager: ``str`` or :class: `GCEInstanceGroupManager` :keyword instances: list of Node objects to be recreated. If equal to None, all instances in the managed instance group are recreated. :type instances: ``list`` of :class: `Node`, ``list`` of instance names (only), ``list`` of instance URIs, or None. :return: Dictionary containing instance URI and currentAction. See ex_instancegroupmanager_list_managed_instances for more details. :rtype: ``dict`` """ instance_uris = [] if not isinstance(manager, GCEInstanceGroupManager) and not isinstance( manager, str): raise ValueError("InstanceGroupManager must be of type str or " "GCEInstanceGroupManager. Type '%s' provided" % (type(manager))) if isinstance(manager, str): manager = self.ex_get_instancegroupmanager(manager) if instances is None: il = self.ex_instancegroupmanager_list_managed_instances(manager) instance_uris = [x['instance'] for x in il] elif isinstance(instances, list): for i in instances: if i.startswith('https://'): instance_uris.append(i) else: instance_uris.append( self.ex_get_node(i, manager.zone)['selfLink']) else: raise ValueError("instances must be 'None or " "a list of instance URIs, instance names, or" "Node objects") request = "/zones/%s/instanceGroupManagers/%s/recreateInstances" % ( manager.zone.name, manager.name) request_data = {'instances': instance_uris} self.connection.request(request, method='POST', data=request_data).object return self.ex_instancegroupmanager_list_managed_instances(manager) def ex_instancegroupmanager_delete_instances(self, manager, node_list): """ Remove instances from GCEInstanceGroupManager and destroy the instance Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param manager: Required. The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035. :type manager: ``str`` or :class: `GCEInstanceGroupManager` :param node_list: list of Node objects to delete. :type node_list: ``list`` of :class:`Node` :return: True if successful :rtype: ``bool`` """ request = "/zones/%s/instanceGroupManagers/%s/deleteInstances" % ( manager.zone.name, manager.name) request_data = {'instances': [x.extra['selfLink'] for x in node_list]} self.connection.request(request, method='POST', data=request_data).object return True def ex_instancegroupmanager_resize(self, manager, size): """ Set the Instance Template for this Instance Group. :param manager: Instance Group Manager to operate on. :type manager: :class:`GCEInstanceGroupManager` :param size: New size of Managed Instance Group. :type size: ``int`` :return: True if successful :rtype: ``bool`` """ req_params = {'size': size} request = '/zones/%s/instanceGroupManagers/%s/resize' % ( manager.zone.name, manager.name) self.connection.async_request(request, method='POST', params=req_params) return True def reboot_node(self, node): """ Reboot a node. :param node: Node to be rebooted :type node: :class:`Node` :return: True if successful, False if not :rtype: ``bool`` """ request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name, node.name) self.connection.async_request(request, method='POST', data='ignored') return True def ex_set_node_tags(self, node, tags): """ Set the tags on a Node instance. Note that this updates the node object directly. :param node: Node object :type node: :class:`Node` :param tags: List of tags to apply to the object :type tags: ``list`` of ``str`` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name, node.name) tags_data = {} tags_data['items'] = tags tags_data['fingerprint'] = node.extra['tags_fingerprint'] self.connection.async_request(request, method='POST', data=tags_data) new_node = self.ex_get_node(node.name, node.extra['zone']) node.extra['tags'] = new_node.extra['tags'] node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint'] return True def ex_set_node_scheduling(self, node, on_host_maintenance=None, automatic_restart=None): """Set the maintenance behavior for the node. See `Scheduling `_ documentation for more info. :param node: Node object :type node: :class:`Node` :keyword on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type on_host_maintenance: ``str`` :keyword automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type automatic_restart: ``bool`` :return: True if successful. :rtype: ``bool`` """ if not hasattr(node, 'name'): node = self.ex_get_node(node, 'all') if on_host_maintenance is not None: on_host_maintenance = on_host_maintenance.upper() ohm_values = ['MIGRATE', 'TERMINATE'] if on_host_maintenance not in ohm_values: raise ValueError('on_host_maintenance must be one of %s' % ','.join(ohm_values)) request = '/zones/%s/instances/%s/setScheduling' % ( node.extra['zone'].name, node.name) scheduling_data = {} if on_host_maintenance is not None: scheduling_data['onHostMaintenance'] = on_host_maintenance if automatic_restart is not None: scheduling_data['automaticRestart'] = automatic_restart self.connection.async_request(request, method='POST', data=scheduling_data) new_node = self.ex_get_node(node.name, node.extra['zone']) node.extra['scheduling'] = new_node.extra['scheduling'] ohm = node.extra['scheduling'].get('onHostMaintenance') ar = node.extra['scheduling'].get('automaticRestart') success = True if on_host_maintenance not in [None, ohm]: success = False if automatic_restart not in [None, ar]: success = False return success def deploy_node(self, name, size, image, script, location=None, ex_network='default', ex_tags=None, ex_service_accounts=None): """ Create a new node and run a script on start-up. :param name: The name of the node to create. :type name: ``str`` :param size: The machine type to use. :type size: ``str`` or :class:`GCENodeSize` :param image: The image to use to create the node. :type image: ``str`` or :class:`GCENodeImage` :param script: File path to start-up script :type script: ``str`` :keyword location: The location (zone) to create the node in. :type location: ``str`` or :class:`NodeLocation` or :class:`GCEZone` or ``None`` :keyword ex_network: The network to associate with the node. :type ex_network: ``str`` or :class:`GCENetwork` :keyword ex_tags: A list of tags to associate with the node. :type ex_tags: ``list`` of ``str`` or ``None`` :keyword ex_service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type ex_service_accounts: ``list`` :return: A Node object for the new node. :rtype: :class:`Node` """ with open(script, 'r') as f: script_data = f.read() # TODO(erjohnso): allow user defined metadata here... metadata = {'items': [{'key': 'startup-script', 'value': script_data}]} return self.create_node(name, size, image, location=location, ex_network=ex_network, ex_tags=ex_tags, ex_metadata=metadata, ex_service_accounts=ex_service_accounts) def attach_volume(self, node, volume, device=None, ex_mode=None, ex_boot=False, ex_type=None, ex_source=None, ex_auto_delete=None, ex_initialize_params=None, ex_licenses=None, ex_interface=None): """ Attach a volume to a node. If volume is None, an ex_source URL must be provided. :param node: The node to attach the volume to :type node: :class:`Node` or ``None`` :param volume: The volume to attach. :type volume: :class:`StorageVolume` or ``None`` :keyword device: The device name to attach the volume as. Defaults to volume name. :type device: ``str`` :keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY' :type ex_mode: ``str`` :keyword ex_boot: If true, disk will be attached as a boot disk :type ex_boot: ``bool`` :keyword ex_type: Specify either 'PERSISTENT' (default) or 'SCRATCH'. :type ex_type: ``str`` :keyword ex_source: URL (full or partial) of disk source. Must be present if not using an existing StorageVolume. :type ex_source: ``str`` or ``None`` :keyword ex_auto_delete: If set, the disk will be auto-deleted if the parent node/instance is deleted. :type ex_auto_delete: ``bool`` or ``None`` :keyword ex_initialize_params: Allow user to pass in full JSON struct of `initializeParams` as documented in GCE's API. :type ex_initialize_params: ``dict`` or ``None`` :keyword ex_licenses: List of strings representing licenses associated with the volume/disk. :type ex_licenses: ``list`` of ``str`` :keyword ex_interface: User can specify either 'SCSI' (default) or 'NVME'. :type ex_interface: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ if volume is None and ex_source is None: raise ValueError("Must supply either a StorageVolume or " "set `ex_source` URL for an existing disk.") if volume is None and device is None: raise ValueError("Must supply either a StorageVolume or " "set `device` name.") volume_data = {} if ex_source: volume_data['source'] = ex_source if ex_initialize_params: volume_data['initialzeParams'] = ex_initialize_params if ex_licenses: volume_data['licenses'] = ex_licenses if ex_interface: volume_data['interface'] = ex_interface if ex_type: volume_data['type'] = ex_type if ex_auto_delete: volume_data['autoDelete'] = ex_auto_delete volume_data['source'] = ex_source or volume.extra['selfLink'] volume_data['mode'] = ex_mode or 'READ_WRITE' if device: volume_data['deviceName'] = device else: volume_data['deviceName'] = volume.name volume_data['boot'] = ex_boot request = '/zones/%s/instances/%s/attachDisk' % ( node.extra['zone'].name, node.name) self.connection.async_request(request, method='POST', data=volume_data) return True def detach_volume(self, volume, ex_node=None): """ Detach a volume from a node. :param volume: Volume object to detach :type volume: :class:`StorageVolume` :keyword ex_node: Node object to detach volume from (required) :type ex_node: :class:`Node` :return: True if successful :rtype: ``bool`` """ if not ex_node: return False request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % ( ex_node.extra['zone'].name, ex_node.name, volume.name) self.connection.async_request(request, method='POST', data='ignored') return True def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): """ Sets the auto-delete flag for a volume attached to a node. :param volume: Volume object to auto-delete :type volume: :class:`StorageVolume` :param ex_node: Node object to auto-delete volume from :type ex_node: :class:`Node` :keyword auto_delete: Flag to set for the auto-delete value :type auto_delete: ``bool`` (default True) :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( node.extra['zone'].name, node.name) delete_params = { 'deviceName': volume.name, 'autoDelete': auto_delete, } self.connection.async_request(request, method='POST', params=delete_params) return True def ex_destroy_address(self, address): """ Destroy a static address. :param address: Address object to destroy :type address: ``str`` or :class:`GCEAddress` :return: True if successful :rtype: ``bool`` """ if not hasattr(address, 'name'): address = self.ex_get_address(address) if hasattr(address.region, 'name'): request = '/regions/%s/addresses/%s' % (address.region.name, address.name) else: request = '/global/addresses/%s' % (address.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_backendservice(self, backendservice): """ Destroy a Backend Service. :param backendservice: BackendService object to destroy :type backendservice: :class:`GCEBackendService` :return: True if successful :rtype: ``bool`` """ request = '/global/backendServices/%s' % backendservice.name self.connection.async_request(request, method='DELETE') return True def ex_delete_image(self, image): """ Delete a specific image resource. :param image: Image object to delete :type image: ``str`` or :class:`GCENodeImage` :return: True if successful :rtype: ``bool`` """ if not hasattr(image, 'name'): image = self.ex_get_image(image) request = '/global/images/%s' % (image.name) self.connection.async_request(request, method='DELETE') return True def ex_deprecate_image(self, image, replacement, state=None, deprecated=None, obsolete=None, deleted=None): """ Deprecate a specific image resource. :param image: Image object to deprecate :type image: ``str`` or :class: `GCENodeImage` :param replacement: Image object to use as a replacement :type replacement: ``str`` or :class: `GCENodeImage` :param state: State of the image :type state: ``str`` :param deprecated: RFC3339 timestamp to mark DEPRECATED :type deprecated: ``str`` or ``None`` :param obsolete: RFC3339 timestamp to mark OBSOLETE :type obsolete: ``str`` or ``None`` :param deleted: RFC3339 timestamp to mark DELETED :type deleted: ``str`` or ``None`` :return: True if successful :rtype: ``bool`` """ if not hasattr(image, 'name'): image = self.ex_get_image(image) if not hasattr(replacement, 'name'): replacement = self.ex_get_image(replacement) if state is None: state = 'DEPRECATED' possible_states = ['ACTIVE', 'DELETED', 'DEPRECATED', 'OBSOLETE'] if state not in possible_states: raise ValueError('state must be one of %s' % ','.join(possible_states)) if state == 'ACTIVE': image_data = {} else: image_data = { 'state': state, 'replacement': replacement.extra['selfLink'], } for attribute, value in [('deprecated', deprecated), ('obsolete', obsolete), ('deleted', deleted)]: if value is None: continue try: timestamp_to_datetime(value) except: raise ValueError('%s must be an RFC3339 timestamp' % attribute) image_data[attribute] = value request = '/global/images/%s/deprecate' % (image.name) self.connection.request(request, method='POST', data=image_data).object return True def ex_destroy_healthcheck(self, healthcheck): """ Destroy a healthcheck. :param healthcheck: Health check object to destroy :type healthcheck: :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ request = '/global/httpHealthChecks/%s' % (healthcheck.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_firewall(self, firewall): """ Destroy a firewall. :param firewall: Firewall object to destroy :type firewall: :class:`GCEFirewall` :return: True if successful :rtype: ``bool`` """ request = '/global/firewalls/%s' % (firewall.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_forwarding_rule(self, forwarding_rule): """ Destroy a forwarding rule. :param forwarding_rule: Forwarding Rule object to destroy :type forwarding_rule: :class:`GCEForwardingRule` :return: True if successful :rtype: ``bool`` """ if forwarding_rule.region: request = '/regions/%s/forwardingRules/%s' % ( forwarding_rule.region.name, forwarding_rule.name) else: request = '/global/forwardingRules/%s' % forwarding_rule.name self.connection.async_request(request, method='DELETE') return True def ex_destroy_route(self, route): """ Destroy a route. :param route: Route object to destroy :type route: :class:`GCERoute` :return: True if successful :rtype: ``bool`` """ request = '/global/routes/%s' % (route.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_network(self, network): """ Destroy a network. :param network: Network object to destroy :type network: :class:`GCENetwork` :return: True if successful :rtype: ``bool`` """ request = '/global/networks/%s' % (network.name) self.connection.async_request(request, method='DELETE') return True def ex_set_machine_type(self, node, machine_type='n1-standard-1'): """ Set the machine type of the stopped instance. Can be the short-name, a full, or partial URL. :param node: Target node object to change :type node: :class:`Node` :param machine_type: Desired machine type :type machine_type: ``str`` :return: True if successful :rtype: ``bool`` """ request = mt_url = '/zones/%s' % node.extra['zone'].name mt = machine_type.split('/')[-1] mt_url = '%s/machineTypes/%s' % (mt_url, mt) request = '%s/instances/%s/setMachineType' % (request, node.name) body = {"machineType": mt_url} self.connection.async_request(request, method='POST', data=body) return True def ex_start_node(self, node): """ Start a node that is stopped and in TERMINATED state. :param node: Node object to start :type node: :class:`Node` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s/start' % (node.extra['zone'].name, node.name) self.connection.async_request(request, method='POST') return True def ex_stop_node(self, node): """ Stop a running node. :param node: Node object to stop :type node: :class:`Node` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s/stop' % (node.extra['zone'].name, node.name) self.connection.async_request(request, method='POST') return True def ex_destroy_instancegroupmanager(self, manager): """ Destroy a managed instance group. This will destroy all instances that belong to the instance group. :param manager: InstanceGroup object to destroy. :type manager: :class:`GCEInstanceGroup` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instanceGroupManagers/%s' % (manager.zone.name, manager.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_instancetemplate(self, instancetemplate): """ Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param instancetemplate: The name of the instance template to delete. :type instancetemplate: ``str`` :return instanceTemplate: Return True if successful. :rtype instanceTemplate: ````bool```` """ request = "/global/instanceTemplates/%s" % (instancetemplate.name) request_data = {} self.connection.async_request(request, method='DELETE', data=request_data) return True def ex_destroy_autoscaler(self, autoscaler): """ Destroy an Autoscaler. :param autoscaler: Autoscaler object to destroy. :type autoscaler: :class:`GCEAutoscaler` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/autoscalers/%s' % (autoscaler.zone.name, autoscaler.name) self.connection.async_request(request, method='DELETE') return True def destroy_node(self, node, destroy_boot_disk=False): """ Destroy a node. :param node: Node object to destroy :type node: :class:`Node` :keyword destroy_boot_disk: If true, also destroy the node's boot disk. (Note that this keyword is not accessible from the node's .destroy() method.) :type destroy_boot_disk: ``bool`` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s' % (node.extra['zone'].name, node.name) self.connection.async_request(request, method='DELETE') if destroy_boot_disk and node.extra['boot_disk']: node.extra['boot_disk'].destroy() return True def ex_destroy_multiple_nodes(self, node_list, ignore_errors=True, destroy_boot_disk=False, poll_interval=2, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): """ Destroy multiple nodes at once. :param node_list: List of nodes to destroy :type node_list: ``list`` of :class:`Node` :keyword ignore_errors: If true, don't raise an exception if one or more nodes fails to be destroyed. :type ignore_errors: ``bool`` :keyword destroy_boot_disk: If true, also destroy the nodes' boot disks. :type destroy_boot_disk: ``bool`` :keyword poll_interval: Number of seconds between status checks. :type poll_interval: ``int`` :keyword timeout: Number of seconds to wait for all nodes to be destroyed. :type timeout: ``int`` :return: A list of boolean values. One for each node. True means that the node was successfully destroyed. :rtype: ``list`` of ``bool`` """ status_list = [] complete = False start_time = time.time() for node in node_list: request = '/zones/%s/instances/%s' % (node.extra['zone'].name, node.name) try: response = self.connection.request(request, method='DELETE').object except GoogleBaseError: self._catch_error(ignore_errors=ignore_errors) response = None status = {'node': node, 'node_success': False, 'node_response': response, 'disk_success': not destroy_boot_disk, 'disk_response': None} status_list.append(status) while not complete: if (time.time() - start_time >= timeout): raise Exception("Timeout (%s sec) while waiting to delete " "multiple instances") complete = True for status in status_list: # If one of the operations is running, check the status operation = status['node_response'] or status['disk_response'] delete_disk = False if operation: no_errors = True try: response = self.connection.request(operation[ 'selfLink']).object except GoogleBaseError: self._catch_error(ignore_errors=ignore_errors) no_errors = False response = {'status': 'DONE'} if response['status'] == 'DONE': # If a node was deleted, update status and indicate # that the disk is ready to be deleted. if status['node_response']: status['node_response'] = None status['node_success'] = no_errors delete_disk = True else: status['disk_response'] = None status['disk_success'] = no_errors # If we are destroying disks, and the node has been deleted, # destroy the disk. if delete_disk and destroy_boot_disk: boot_disk = status['node'].extra['boot_disk'] if boot_disk: request = '/zones/%s/disks/%s' % ( boot_disk.extra['zone'].name, boot_disk.name) try: response = self.connection.request( request, method='DELETE').object except GoogleBaseError: self._catch_error(ignore_errors=ignore_errors) no_errors = False response = None status['disk_response'] = response else: # If there is no boot disk, ignore status['disk_success'] = True operation = status['node_response'] or status['disk_response'] if operation: time.sleep(poll_interval) complete = False success = [] for status in status_list: s = status['node_success'] and status['disk_success'] success.append(s) return success def ex_destroy_targethttpproxy(self, targethttpproxy): """ Destroy a target HTTP proxy. :param targethttpproxy: TargetHttpProxy object to destroy :type targethttpproxy: :class:`GCETargetHttpProxy` :return: True if successful :rtype: ``bool`` """ request = '/global/targetHttpProxies/%s' % targethttpproxy.name self.connection.async_request(request, method='DELETE') return True def ex_destroy_targethttpsproxy(self, targethttpsproxy): """ Deletes the specified TargetHttpsProxy resource. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param targethttpsproxy: Name of the TargetHttpsProxy resource to delete. :type targethttpsproxy: ``str`` :return targetHttpsProxy: Return True if successful. :rtype targetHttpsProxy: ````bool```` """ request = "/global/targetHttpsProxies/%s" % (targethttpsproxy.name) request_data = {} self.connection.async_request(request, method='DELETE', data=request_data) return True def ex_destroy_targetinstance(self, targetinstance): """ Destroy a target instance. :param targetinstance: TargetInstance object to destroy :type targetinstance: :class:`GCETargetInstance` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/targetInstances/%s' % (targetinstance.zone.name, targetinstance.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_targetpool(self, targetpool): """ Destroy a target pool. :param targetpool: TargetPool object to destroy :type targetpool: :class:`GCETargetPool` :return: True if successful :rtype: ``bool`` """ request = '/regions/%s/targetPools/%s' % (targetpool.region.name, targetpool.name) self.connection.async_request(request, method='DELETE') return True def ex_destroy_urlmap(self, urlmap): """ Destroy a URL map. :param urlmap: UrlMap object to destroy :type urlmap: :class:`GCEUrlMap` :return: True if successful :rtype: ``bool`` """ request = '/global/urlMaps/%s' % urlmap.name self.connection.async_request(request, method='DELETE') return True def destroy_volume(self, volume): """ Destroy a volume. :param volume: Volume object to destroy :type volume: :class:`StorageVolume` :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/disks/%s' % (volume.extra['zone'].name, volume.name) self.connection.async_request(request, method='DELETE') return True def destroy_volume_snapshot(self, snapshot): """ Destroy a snapshot. :param snapshot: Snapshot object to destroy :type snapshot: :class:`GCESnapshot` :return: True if successful :rtype: ``bool`` """ request = '/global/snapshots/%s' % (snapshot.name) self.connection.async_request(request, method='DELETE') return True def ex_get_license(self, project, name): """ Return a License object for specified project and name. :param project: The project to reference when looking up the license. :type project: ``str`` :param name: The name of the License :type name: ``str`` :return: A License object for the name :rtype: :class:`GCELicense` """ return GCELicense.lazy(name, project, self) def ex_get_disktype(self, name, zone=None): """ Return a DiskType object based on a name and optional zone. :param name: The name of the DiskType :type name: ``str`` :keyword zone: The zone to search for the DiskType in (set to 'all' to search all zones) :type zone: ``str`` :class:`GCEZone` or ``None`` :return: A DiskType object for the name :rtype: :class:`GCEDiskType` """ zone = self._set_zone(zone) request = '/zones/%s/diskTypes/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_disktype(response) def ex_get_address(self, name, region=None): """ Return an Address object based on an address name and optional region. :param name: The name of the address :type name: ``str`` :keyword region: The region to search for the address in (set to 'all' to search all regions) :type region: ``str`` :class:`GCERegion` or ``None`` :return: An Address object for the address :rtype: :class:`GCEAddress` """ if region == 'global': request = '/global/addresses/%s' % (name) else: region = self._set_region(region) or self._find_zone_or_region( name, 'addresses', region=True, res_name='Address') request = '/regions/%s/addresses/%s' % (region.name, name) response = self.connection.request(request, method='GET').object return self._to_address(response) def ex_get_backendservice(self, name): """ Return a Backend Service object based on name :param name: The name of the backend service :type name: ``str`` :return: A BackendService object for the backend service :rtype: :class:`GCEBackendService` """ request = '/global/backendServices/%s' % name response = self.connection.request(request, method='GET').object return self._to_backendservice(response) def ex_get_healthcheck(self, name): """ Return a HealthCheck object based on the healthcheck name. :param name: The name of the healthcheck :type name: ``str`` :return: A GCEHealthCheck object :rtype: :class:`GCEHealthCheck` """ request = '/global/httpHealthChecks/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_healthcheck(response) def ex_get_firewall(self, name): """ Return a Firewall object based on the firewall name. :param name: The name of the firewall :type name: ``str`` :return: A GCEFirewall object :rtype: :class:`GCEFirewall` """ request = '/global/firewalls/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_firewall(response) def ex_get_forwarding_rule(self, name, region=None, global_rule=False): """ Return a Forwarding Rule object based on the forwarding rule name. :param name: The name of the forwarding rule :type name: ``str`` :keyword region: The region to search for the rule in (set to 'all' to search all regions). :type region: ``str`` or ``None`` :keyword global_rule: Set to True to get a global forwarding rule. Region will be ignored if True. :type global_rule: ``bool`` :return: A GCEForwardingRule object :rtype: :class:`GCEForwardingRule` """ if global_rule: request = '/global/forwardingRules/%s' % name else: region = self._set_region(region) or self._find_zone_or_region( name, 'forwardingRules', region=True, res_name='ForwardingRule') request = '/regions/%s/forwardingRules/%s' % (region.name, name) response = self.connection.request(request, method='GET').object return self._to_forwarding_rule(response) def ex_get_image(self, partial_name, ex_project_list=None, ex_standard_projects=True): """ Return an GCENodeImage object based on the name or link provided. :param partial_name: The name, partial name, or full path of a GCE image. :type partial_name: ``str`` :param ex_project_list: The name of the project to list for images. Examples include: 'debian-cloud'. :type ex_project_list: ``str`` or ``list`` of ``str`` or ``None`` :param ex_standard_projects: If true, check in standard projects if the image is not found. :type ex_standard_projects: ``bool`` :return: GCENodeImage object based on provided information or None if an image with that name is not found. :rtype: :class:`GCENodeImage` or raise ``ResourceNotFoundError`` """ if partial_name.startswith('https://'): response = self.connection.request(partial_name, method='GET') return self._to_node_image(response.object) image = self._match_images(ex_project_list, partial_name) if not image and ex_standard_projects: for img_proj, short_list in self.IMAGE_PROJECTS.items(): for short_name in short_list: if partial_name.startswith(short_name): image = self._match_images(img_proj, partial_name) if not image: raise ResourceNotFoundError('Could not find image \'%s\'' % (partial_name), None, None) return image def ex_get_image_from_family(self, image_family, ex_project_list=None, ex_standard_projects=True): """ Return an GCENodeImage object based on an image family name. :param image_family: The name of the 'Image Family' to return the latest image from. :type image_family: ``str`` :param ex_project_list: The name of the project to list for images. Examples include: 'debian-cloud'. :type ex_project_list: ``list`` of ``str``, or ``None`` :param ex_standard_projects: If true, check in standard projects if the image is not found. :type ex_standard_projects: ``bool`` :return: GCENodeImage object based on provided information or ResourceNotFoundError if the image family is not found. :rtype: :class:`GCENodeImage` or raise ``ResourceNotFoundError`` """ def _try_image_family(image_family, project=None): request = '/global/images/family/%s' % (image_family) save_request_path = self.connection.request_path if project: new_request_path = save_request_path.replace(self.project, project) self.connection.request_path = new_request_path try: response = self.connection.request(request, method='GET') image = self._to_node_image(response.object) except ResourceNotFoundError: image = None finally: self.connection.request_path = save_request_path return image image = None if image_family.startswith('https://'): response = self.connection.request(image_family, method='GET') return self._to_node_image(response.object) if not ex_project_list: image = _try_image_family(image_family) else: for img_proj in ex_project_list: image = _try_image_family(image_family, project=img_proj) if image: break if not image and ex_standard_projects: for img_proj, short_list in self.IMAGE_PROJECTS.items(): for short_name in short_list: if image_family.startswith(short_name): image = _try_image_family(image_family, project=img_proj) if not image: raise ResourceNotFoundError('Could not find image for family ' '\'%s\'' % (image_family), None, None) return image def ex_get_route(self, name): """ Return a Route object based on a route name. :param name: The name of the route :type name: ``str`` :return: A Route object for the named route :rtype: :class:`GCERoute` """ request = '/global/routes/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_route(response) def ex_destroy_sslcertificate(self, sslcertificate): """ Deletes the specified SslCertificate resource. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute :param sslcertificate: Name of the SslCertificate resource to delete. :type sslcertificate: ``str`` :return sslCertificate: Return True if successful. :rtype sslCertificate: ````bool```` """ request = "/global/sslCertificates/%s" % (sslcertificate.name) request_data = {} self.connection.async_request(request, method='DELETE', data=request_data) return True def ex_destroy_subnetwork(self, name, region=None): """ Delete a Subnetwork object based on name and region. :param name: The name, URL or object of the subnetwork :type name: ``str`` or :class:`GCESubnetwork` :keyword region: The region object, name, or URL of the subnetwork :type region: ``str`` or :class:`GCERegion` or ``None`` :return: True if successful :rtype: ``bool`` """ region_name = None subnet_name = None if region: if isinstance(region, GCERegion): region_name = region.name else: if region.startswith('https://'): region_name = region.split('/')[-1] else: region_name = region if isinstance(name, GCESubnetwork): subnet_name = name.name if not region_name: region_name = name.region.name else: if name.startswith('https://'): url_parts = self._get_components_from_path(name) subnet_name = url_parts['name'] if not region_name: region_name = url_parts['region'] else: subnet_name = name if not region_name: region = self._set_region(region) if not region: raise ValueError("Could not determine region for subnetwork.") else: region_name = region.name request = '/regions/%s/subnetworks/%s' % (region_name, subnet_name) self.connection.async_request(request, method='DELETE').object return True def ex_get_subnetwork(self, name, region=None): """ Return a Subnetwork object based on name and region. :param name: The name or URL of the subnetwork :type name: ``str`` :keyword region: The region of the subnetwork :type region: ``str`` or :class:`GCERegion` or ``None`` :return: A Subnetwork object :rtype: :class:`GCESubnetwork` """ region_name = None if name.startswith('https://'): parts = self._get_components_from_path(name) name = parts['name'] region_name = parts['region'] else: if isinstance(region, GCERegion): region_name = region.name elif isinstance(region, str): if region.startswith('https://'): region_name = region.split('/')[-1] else: region_name = region if not region_name: region = self._set_region(region) if not region: raise ValueError("Could not determine region for subnetwork.") else: region_name = region.name request = '/regions/%s/subnetworks/%s' % (region_name, name) response = self.connection.request(request, method='GET').object return self._to_subnetwork(response) def ex_get_network(self, name): """ Return a Network object based on a network name. :param name: The name of the network :type name: ``str`` :return: A Network object for the network :rtype: :class:`GCENetwork` """ request = '/global/networks/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_network(response) def ex_get_node(self, name, zone=None): """ Return a Node object based on a node name and optional zone. :param name: The name of the node :type name: ``str`` :keyword zone: The zone to search for the node in. If set to 'all', search all zones for the instance. :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :return: A Node object for the node :rtype: :class:`Node` """ zone = self._set_zone(zone) or self._find_zone_or_region( name, 'instances', res_name='Node') request = '/zones/%s/instances/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_node(response) def ex_get_project(self): """ Return a Project object with project-wide information. :return: A GCEProject object :rtype: :class:`GCEProject` """ response = self.connection.request('', method='GET').object return self._to_project(response) def ex_get_size(self, name, zone=None): """ Return a size object based on a machine type name and zone. :param name: The name of the node :type name: ``str`` :keyword zone: The zone to search for the machine type in :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :return: A GCENodeSize object for the machine type :rtype: :class:`GCENodeSize` """ zone = zone or self.zone if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) request = '/zones/%s/machineTypes/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_node_size(response) def ex_get_snapshot(self, name): """ Return a Snapshot object based on snapshot name. :param name: The name of the snapshot :type name: ``str`` :return: A GCESnapshot object for the snapshot :rtype: :class:`GCESnapshot` """ request = '/global/snapshots/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_snapshot(response) def ex_get_volume(self, name, zone=None, use_cache=False): """ Return a Volume object based on a volume name and optional zone. To improve performance, we request all disks and allow the user to consult the cache dictionary rather than making an API call. :param name: The name of the volume :type name: ``str`` :keyword zone: The zone to search for the volume in (set to 'all' to search all zones) :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :keyword use_cache: Search for the volume in the existing cache of volumes. If True, we omit the API call and search self.volumes_dict. If False, a call to disks/aggregatedList is made prior to searching self._ex_volume_dict. :type use_cache: ``bool`` :return: A StorageVolume object for the volume :rtype: :class:`StorageVolume` """ if not self._ex_volume_dict or use_cache is False: # Make the API call and build volume dictionary self._ex_populate_volume_dict() try: # if zone is of class GCEZone or NodeLocation, get name instead zone = zone.name except AttributeError: pass return self._ex_lookup_volume(name, zone) def ex_get_region(self, name): """ Return a Region object based on the region name. :param name: The name of the region. :type name: ``str`` :return: A GCERegion object for the region :rtype: :class:`GCERegion` """ if name.startswith('https://'): short_name = self._get_components_from_path(name)['name'] request = name else: short_name = name request = '/regions/%s' % (name) # Check region cache first if short_name in self.region_dict: return self.region_dict[short_name] # Otherwise, look up region information response = self.connection.request(request, method='GET').object return self._to_region(response) def ex_get_sslcertificate(self, name): """ Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :param name: Name of the SslCertificate resource to return. :type name: ``str`` :return: `GCESslCertificate` object. :rtype: :class:`GCESslCertificate` """ request = "/global/sslCertificates/%s" % (name) response = self.connection.request(request, method='GET').object return self._to_sslcertificate(response) def ex_get_targethttpproxy(self, name): """ Return a Target HTTP Proxy object based on its name. :param name: The name of the target HTTP proxy. :type name: ``str`` :return: A Target HTTP Proxy object for the pool :rtype: :class:`GCETargetHttpProxy` """ request = '/global/targetHttpProxies/%s' % name response = self.connection.request(request, method='GET').object return self._to_targethttpproxy(response) def ex_get_targethttpsproxy(self, name): """ Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :param name: Name of the TargetHttpsProxy resource to return. :type name: ``str`` :return: `GCETargetHttpsProxy` object. :rtype: :class:`GCETargetHttpsProxy` """ request = "/global/targetHttpsProxies/%s" % (name) response = self.connection.request(request, method='GET').object return self._to_targethttpsproxy(response) def ex_get_targetinstance(self, name, zone=None): """ Return a TargetInstance object based on a name and optional zone. :param name: The name of the target instance :type name: ``str`` :keyword zone: The zone to search for the target instance in (set to 'all' to search all zones). :type zone: ``str`` or :class:`GCEZone` or ``None`` :return: A TargetInstance object for the instance :rtype: :class:`GCETargetInstance` """ zone = self._set_zone(zone) or self._find_zone_or_region( name, 'targetInstances', res_name='TargetInstance') request = '/zones/%s/targetInstances/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_targetinstance(response) def ex_get_targetpool(self, name, region=None): """ Return a TargetPool object based on a name and optional region. :param name: The name of the target pool :type name: ``str`` :keyword region: The region to search for the target pool in (set to 'all' to search all regions). :type region: ``str`` or :class:`GCERegion` or ``None`` :return: A TargetPool object for the pool :rtype: :class:`GCETargetPool` """ region = self._set_region(region) or self._find_zone_or_region( name, 'targetPools', region=True, res_name='TargetPool') request = '/regions/%s/targetPools/%s' % (region.name, name) response = self.connection.request(request, method='GET').object return self._to_targetpool(response) def ex_get_urlmap(self, name): """ Return a URL Map object based on name :param name: The name of the url map :type name: ``str`` :return: A URL Map object for the backend service :rtype: :class:`GCEUrlMap` """ request = '/global/urlMaps/%s' % name response = self.connection.request(request, method='GET').object return self._to_urlmap(response) def ex_get_instancegroup(self, name, zone=None): """ Returns the specified Instance Group. Get a list of available instance groups by making a list() request. Scopes needed - one of the following: * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/compute * https://www.googleapis.com/auth/compute.readonly :param name: The name of the instance group. :type name: ``str`` :param zone: The name of the zone where the instance group is located. :type zone: ``str`` :return: `GCEInstanceGroup` object. :rtype: :class:`GCEInstanceGroup` """ zone = self._set_zone(zone) or self._find_zone_or_region( name, 'instanceGroups', region=False, res_name='Instancegroup') request = "/zones/%s/instanceGroups/%s" % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_instancegroup(response) def ex_get_instancegroupmanager(self, name, zone=None): """ Return a InstanceGroupManager object based on a name and optional zone. :param name: The name of the Instance Group Manager. :type name: ``str`` :keyword zone: The zone to search for the Instance Group Manager. Set to 'all' to search all zones. :type zone: ``str`` or :class:`GCEZone` or ``None`` :return: An Instance Group Manager object. :rtype: :class:`GCEInstanceGroupManager` """ zone = self._set_zone(zone) or self._find_zone_or_region( name, 'instanceGroupManagers', region=False, res_name='Instancegroupmanager') request = '/zones/%s/instanceGroupManagers/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_instancegroupmanager(response) def ex_get_instancetemplate(self, name): """ Return an InstanceTemplate object based on a name and optional zone. :param name: The name of the Instance Template. :type name: ``str`` :return: An Instance Template object. :rtype: :class:`GCEInstanceTemplate` """ request = '/global/instanceTemplates/%s' % (name) response = self.connection.request(request, method='GET').object return self._to_instancetemplate(response) def ex_get_autoscaler(self, name, zone=None): """ Return an Autoscaler object based on a name and optional zone. :param name: The name of the Autoscaler. :type name: ``str`` :keyword zone: The zone to search for the Autoscaler. Set to 'all' to search all zones. :type zone: ``str`` or :class:`GCEZone` or ``None`` :return: An Autoscaler object. :rtype: :class:`GCEAutoscaler` """ zone = self._set_zone(zone) or self._find_zone_or_region( name, 'Autoscalers', region=False, res_name='Autoscalers') request = '/zones/%s/autoscalers/%s' % (zone.name, name) response = self.connection.request(request, method='GET').object return self._to_autoscaler(response) def ex_get_zone(self, name): """ Return a Zone object based on the zone name. :param name: The name of the zone. :type name: ``str`` :return: A GCEZone object for the zone or None if not found :rtype: :class:`GCEZone` or ``None`` """ if name.startswith('https://'): short_name = self._get_components_from_path(name)['name'] request = name else: short_name = name request = '/zones/%s' % (name) # Check zone cache first if short_name in self.zone_dict: return self.zone_dict[short_name] # Otherwise, look up zone information try: response = self.connection.request(request, method='GET').object except ResourceNotFoundError: return None return self._to_zone(response) def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, 'project': self.project, 'scopes': self.scopes, 'credential_file': self.credential_file} def _build_volume_dict(self, zone_dict): """ Build a dictionary in [name][zone]=disk format. :param zone_dict: dict in the format of: { items: {key: {api_name:[], key2: api_name:[]}} } :type zone_dict: ``dict`` :return: dict of volumes, organized by name, then zone Format: { 'disk_name': {'zone_name1': disk_info, 'zone_name2': disk_info} } :rtype: ``dict`` """ name_zone_dict = {} for k, v in zone_dict.items(): zone_name = k.replace('zones/', '') disks = v.get('disks', []) for disk in disks: n = disk['name'] name_zone_dict.setdefault(n, {}) name_zone_dict[n].update({zone_name: disk}) return name_zone_dict def _ex_lookup_volume(self, volume_name, zone=None): """ Look up volume by name and zone in volume dict. If zone isn't specified or equals 'all', we return the volume for the first zone, as determined alphabetically. :param volume_name: The name of the volume. :type volume_name: ``str`` :keyword zone: The zone to search for the volume in (set to 'all' to search all zones) :type zone: ``str`` or ``None`` :return: A StorageVolume object for the volume. :rtype: :class:`StorageVolume` or raise ``ResourceNotFoundError``. """ if volume_name not in self._ex_volume_dict: # Possibly added through another thread/process, so re-populate # _volume_dict and try again. If still not found, raise exception. self._ex_populate_volume_dict() if volume_name not in self._ex_volume_dict: raise ResourceNotFoundError( 'Volume name: \'%s\' not found. Zone: %s' % ( volume_name, zone), None, None) # Disk names are not unique across zones, so if zone is None or # 'all', we return the first one we find for that disk name. For # consistency, we sort by keys and set the zone to the first key. if zone is None or zone == 'all': zone = sorted(self._ex_volume_dict[volume_name])[0] volume = self._ex_volume_dict[volume_name].get(zone, None) if not volume: raise ResourceNotFoundError( 'Volume \'%s\' not found for zone %s.' % (volume_name, zone), None, None) return self._to_storage_volume(volume) def _ex_populate_volume_dict(self): """ Fetch the volume information using disks/aggregatedList and store it in _ex_volume_dict. return: ``None`` """ # fill the volume dict by making an aggegatedList call to disks. aggregated_items = self.connection.request_aggregated_items( "disks") # _ex_volume_dict is in the format of: # { 'disk_name' : { 'zone1': disk, 'zone2': disk, ... }} self._ex_volume_dict = self._build_volume_dict( aggregated_items['items']) return None def _catch_error(self, ignore_errors=False): """ Catch an exception and raise it unless asked to ignore it. :keyword ignore_errors: If true, just return the error. Otherwise, raise the error. :type ignore_errors: ``bool`` :return: The exception that was raised. :rtype: :class:`Exception` """ e = sys.exc_info()[1] if ignore_errors: return e else: raise e def _get_components_from_path(self, path): """ Return a dictionary containing name & zone/region from a request path. :param path: HTTP request path (e.g. '/project/pjt-name/zones/us-central1-a/instances/mynode') :type path: ``str`` :return: Dictionary containing name and zone/region of resource :rtype: ``dict`` """ region = None zone = None glob = False components = path.split('/') name = components[-1] if components[-4] == 'regions': region = components[-3] elif components[-4] == 'zones': zone = components[-3] elif components[-3] == 'global': glob = True return {'name': name, 'region': region, 'zone': zone, 'global': glob} def _get_object_by_kind(self, url): """ Fetch a resource and return its object representation by mapping its 'kind' parameter to the appropriate class. Returns ``None`` if url is ``None`` :param url: fully qualified URL of the resource to request from GCE :type url: ``str`` :return: Object representation of the requested resource. "rtype: :class:`object` or ``None`` """ if not url: return None # Relies on GoogleBaseConnection.morph_action_hook to rewrite # the URL to a request response = self.connection.request(url, method='GET').object return GCENodeDriver.KIND_METHOD_MAP[response['kind']](self, response) def _get_region_from_zone(self, zone): """ Return the Region object that contains the given Zone object. :param zone: Zone object :type zone: :class:`GCEZone` :return: Region object that contains the zone :rtype: :class:`GCERegion` """ for region in self.region_list: zones = [z.name for z in region.zones] if zone.name in zones: return region def _find_zone_or_region(self, name, res_type, region=False, res_name=None): """ Find the zone or region for a named resource. :param name: Name of resource to find :type name: ``str`` :param res_type: Type of resource to find. Examples include: 'disks', 'instances' or 'addresses' :type res_type: ``str`` :keyword region: If True, search regions instead of zones :type region: ``bool`` :keyword res_name: The name of the resource type for error messages. Examples: 'Volume', 'Node', 'Address' :keyword res_name: ``str`` :return: Zone/Region object for the zone/region for the resource. :rtype: :class:`GCEZone` or :class:`GCERegion` """ if region: rz = 'region' else: rz = 'zone' rz_name = None res_name = res_name or res_type request = '/aggregated/%s' % (res_type) res_list = self.connection.request(request).object for k, v in res_list['items'].items(): for res in v.get(res_type, []): if res['name'] == name: rz_name = k.replace('%ss/' % (rz), '') break if not rz_name: raise ResourceNotFoundError('%s \'%s\' not found in any %s.' % (res_name, name, rz), None, None) else: getrz = getattr(self, 'ex_get_%s' % (rz)) return getrz(rz_name) def _match_images(self, project, partial_name): """ Find the latest image, given a partial name. For example, providing 'debian-7' will return the image object for the most recent image with a name that starts with 'debian-7' in the supplied project. If no project is given, it will search your own project. :param project: The name of the project to search for images. Examples include: 'debian-cloud' and 'centos-cloud'. :type project: ``str``, ``list`` of ``str``, or ``None`` :param partial_name: The full name or beginning of a name for an image. :type partial_name: ``str`` :return: The latest image object that matches the partial name or None if no matching image is found. :rtype: :class:`GCENodeImage` or ``None`` """ project_images_pages = self.ex_list( self.list_images, ex_project=project, ex_include_deprecated=True) partial_match = [] for page in project_images_pages: for image in page: if image.name == partial_name: return image if image.name.startswith(partial_name): ts = timestamp_to_datetime( image.extra['creationTimestamp']) if not partial_match or partial_match[0] < ts: partial_match = [ts, image] if partial_match: return partial_match[1] def _set_region(self, region): """ Return the region to use for listing resources. :param region: A name, region object, None, or 'all' :type region: ``str`` or :class:`GCERegion` or ``None`` :return: A region object or None if all regions should be considered :rtype: :class:`GCERegion` or ``None`` """ region = region or self.region if region == 'all' or region is None: return None if not hasattr(region, 'name'): region = self.ex_get_region(region) return region def _set_zone(self, zone): """ Return the zone to use for listing resources. :param zone: A name, zone object, None, or 'all' :type zone: ``str`` or :class:`GCEZone` or ``None`` :return: A zone object or None if all zones should be considered :rtype: :class:`GCEZone` or ``None`` """ zone = zone or self.zone if zone == 'all' or zone is None: return None if not hasattr(zone, 'name'): zone = self.ex_get_zone(zone) return zone def _create_node_req( self, name, size, image, location, network=None, tags=None, metadata=None, boot_disk=None, external_ip='ephemeral', ex_disk_type='pd-standard', ex_disk_auto_delete=True, ex_service_accounts=None, description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None, ex_nic_gce_struct=None, ex_on_host_maintenance=None, ex_automatic_restart=None, ex_preemptible=None, ex_subnetwork=None): """ Returns a request and body to create a new node. This is a helper method to support both :class:`create_node` and :class:`ex_create_multiple_nodes`. :param name: The name of the node to create. :type name: ``str`` :param size: The machine type to use. :type size: :class:`GCENodeSize` :param image: The image to use to create the node (or, if using a persistent disk, the image the disk was created from). :type image: :class:`GCENodeImage` or ``None`` :param location: The location (zone) to create the node in. :type location: :class:`NodeLocation` or :class:`GCEZone` :param network: The network to associate with the node. :type network: :class:`GCENetwork` :keyword tags: A list of tags to associate with the node. :type tags: ``list`` of ``str`` :keyword metadata: Metadata dictionary for instance. :type metadata: ``dict`` :keyword boot_disk: Persistent boot disk to attach. :type :class:`StorageVolume` or ``None`` :keyword external_ip: The external IP address to use. If 'ephemeral' (default), a new non-static address will be used. If 'None', then no external address will be used. To use an existing static IP address, a GCEAddress object should be passed in. This param will be ignored if also using the ex_nic_gce_struct param. :type external_ip: :class:`GCEAddress` or ``str`` or None :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd for an SSD disk. :type ex_disk_type: ``str`` or :class:`GCEDiskType` or ``None`` :keyword ex_disk_auto_delete: Indicate that the boot disk should be deleted when the Node is deleted. Set to True by default. :type ex_disk_auto_delete: ``bool`` :keyword ex_service_accounts: Specify a list of serviceAccounts when creating the instance. The format is a list of dictionaries containing email and list of scopes, e.g. [{'email':'default', 'scopes':['compute', ...]}, ...] Scopes can either be full URLs or short names. If not provided, use the 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in 'gcloud compute'. :type ex_service_accounts: ``list`` :keyword description: The description of the node (instance). :type description: ``str`` or ``None`` :keyword ex_can_ip_forward: Set to ``True`` to allow this node to send/receive non-matching src/dst packets. :type ex_can_ip_forward: ``bool`` or ``None`` :keyword ex_disks_gce_struct: Support for passing in the GCE-specific formatted disks[] structure. No attempt is made to ensure proper formatting of the disks[] structure. Using this structure obviates the need of using other disk params like 'boot_disk', etc. See the GCE docs for specific details. :type ex_disks_gce_struct: ``list`` or ``None`` :keyword ex_nic_gce_struct: Support passing in the GCE-specific formatted networkInterfaces[] structure. No attempt is made to ensure proper formatting of the networkInterfaces[] data. Using this structure obviates the need of using 'external_ip' and 'ex_network'. See the GCE docs for details. :type ex_nic_gce_struct: ``list`` or ``None`` :keyword ex_on_host_maintenance: Defines whether node should be terminated or migrated when host machine goes down. Acceptable values are: 'MIGRATE' or 'TERMINATE' (If not supplied, value will be reset to GCE default value for the instance type.) :type ex_on_host_maintenance: ``str`` or ``None`` :keyword ex_automatic_restart: Defines whether the instance should be automatically restarted when it is terminated by Compute Engine. (If not supplied, value will be set to the GCE default value for the instance type.) :type ex_automatic_restart: ``bool`` or ``None`` :keyword ex_preemptible: Defines whether the instance is preemptible. (If not supplied, the instance will not be preemptible) :type ex_preemptible: ``bool`` or ``None`` :param ex_subnetwork: The network to associate with the node. :type ex_subnetwork: :class:`GCESubnetwork` :return: A tuple containing a request string and a node_data dict. :rtype: ``tuple`` of ``str`` and ``dict`` """ # build disks if not image and not boot_disk and not ex_disks_gce_struct: raise ValueError("Missing root device or image. Must specify an " "'image', existing 'boot_disk', or use the " "'ex_disks_gce_struct'.") if boot_disk and ex_disks_gce_struct: raise ValueError("Cannot specify both 'boot_disk' and " "'ex_disks_gce_struct'. Use one or the other.") use_selflinks = True source = None if boot_disk: source = boot_disk node_data = self._create_instance_properties( name, node_size=size, image=image, source=source, disk_type=ex_disk_type, disk_auto_delete=ex_disk_auto_delete, external_ip=external_ip, network=network, subnetwork=ex_subnetwork, can_ip_forward=ex_can_ip_forward, service_accounts=ex_service_accounts, on_host_maintenance=ex_on_host_maintenance, automatic_restart=ex_automatic_restart, preemptible=ex_preemptible, tags=tags, metadata=metadata, description=description, disks_gce_struct=ex_disks_gce_struct, nic_gce_struct=ex_nic_gce_struct, use_selflinks=use_selflinks) node_data['name'] = name request = '/zones/%s/instances' % (location.name) return request, node_data def _multi_create_disk(self, status, node_attrs): """Create disk for ex_create_multiple_nodes. :param status: Dictionary for holding node/disk creation status. (This dictionary is modified by this method) :type status: ``dict`` :param node_attrs: Dictionary for holding node attribute information. (size, image, location, ex_disk_type, etc.) :type node_attrs: ``dict`` """ disk = None # Check for existing disk if node_attrs['use_existing_disk']: try: disk = self.ex_get_volume(status['name'], node_attrs['location']) except ResourceNotFoundError: pass if disk: status['disk'] = disk else: # Create disk and return response object back in the status dict. # Or, if there is an error, mark as failed. disk_req, disk_data, disk_params = self._create_vol_req( None, status['name'], location=node_attrs['location'], image=node_attrs['image'], ex_disk_type=node_attrs['ex_disk_type']) try: disk_res = self.connection.request(disk_req, method='POST', data=disk_data, params=disk_params).object except GoogleBaseError: e = self._catch_error( ignore_errors=node_attrs['ignore_errors']) error = e.value code = e.code disk_res = None status['disk'] = GCEFailedDisk(status['name'], error, code) status['disk_response'] = disk_res def _multi_check_disk(self, status, node_attrs): """Check disk status for ex_create_multiple_nodes. :param status: Dictionary for holding node/disk creation status. (This dictionary is modified by this method) :type status: ``dict`` :param node_attrs: Dictionary for holding node attribute information. (size, image, location, etc.) :type node_attrs: ``dict`` """ error = None try: response = self.connection.request(status['disk_response'][ 'selfLink']).object except GoogleBaseError: e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) error = e.value code = e.code response = {'status': 'DONE'} if response['status'] == 'DONE': status['disk_response'] = None if error: status['disk'] = GCEFailedDisk(status['name'], error, code) else: status['disk'] = self.ex_get_volume(status['name'], node_attrs['location']) def _multi_create_node(self, status, node_attrs): """Create node for ex_create_multiple_nodes. :param status: Dictionary for holding node creation status. (This dictionary is modified by this method) :type status: ``dict`` :param node_attrs: Dictionary for holding node attribute information. (size, image, location, etc.) :type node_attrs: ``dict`` """ # Create node and return response object in status dictionary. # Or, if there is an error, mark as failed. request, node_data = self._create_node_req( status['name'], node_attrs['size'], node_attrs['image'], node_attrs['location'], node_attrs['network'], node_attrs['tags'], node_attrs['metadata'], external_ip=node_attrs['external_ip'], ex_service_accounts=node_attrs['ex_service_accounts'], description=node_attrs['description'], ex_can_ip_forward=node_attrs['ex_can_ip_forward'], ex_disk_auto_delete=node_attrs['ex_disk_auto_delete'], ex_disks_gce_struct=node_attrs['ex_disks_gce_struct'], ex_nic_gce_struct=node_attrs['ex_nic_gce_struct'], ex_on_host_maintenance=node_attrs['ex_on_host_maintenance'], ex_automatic_restart=node_attrs['ex_automatic_restart'], ex_subnetwork=node_attrs['subnetwork'], ex_preemptible=node_attrs['ex_preemptible']) try: node_res = self.connection.request(request, method='POST', data=node_data).object except GoogleBaseError: e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) error = e.value code = e.code node_res = None status['node'] = GCEFailedNode(status['name'], error, code) status['node_response'] = node_res def _multi_check_node(self, status, node_attrs): """Check node status for ex_create_multiple_nodes. :param status: Dictionary for holding node/disk creation status. (This dictionary is modified by this method) :type status: ``dict`` :param node_attrs: Dictionary for holding node attribute information. (size, image, location, etc.) :type node_attrs: ``dict`` """ error = None try: response = self.connection.request(status['node_response'][ 'selfLink']).object except GoogleBaseError: e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) error = e.value code = e.code response = {'status': 'DONE'} if response['status'] == 'DONE': status['node_response'] = None if error: status['node'] = GCEFailedNode(status['name'], error, code) else: status['node'] = self.ex_get_node(status['name'], node_attrs['location']) def _create_vol_req(self, size, name, location=None, snapshot=None, image=None, ex_disk_type='pd-standard'): """ Assemble the request/data for creating a volume. Used by create_volume and ex_create_multiple_nodes :param size: Size of volume to create (in GB). Can be None if image or snapshot is supplied. :type size: ``int`` or ``str`` or ``None`` :param name: Name of volume to create :type name: ``str`` :keyword location: Location (zone) to create the volume in :type location: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` :keyword snapshot: Snapshot to create image from :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` :keyword image: Image to create disk from. :type image: :class:`GCENodeImage` or ``str`` or ``None`` :keyword ex_disk_type: Specify pd-standard (default) or pd-ssd :type ex_disk_type: ``str`` or :class:`GCEDiskType` :return: Tuple containing the request string, the data dictionary and the URL parameters :rtype: ``tuple`` """ volume_data = {} params = None volume_data['name'] = name if size: volume_data['sizeGb'] = str(size) if image: if not hasattr(image, 'name'): image = self.ex_get_image(image) params = {'sourceImage': image.extra['selfLink']} volume_data['description'] = 'Image: %s' % ( image.extra['selfLink']) if snapshot: if not hasattr(snapshot, 'name'): # Check for full URI to not break backward-compatibility if snapshot.startswith('https'): snapshot = self._get_components_from_path(snapshot)['name'] snapshot = self.ex_get_snapshot(snapshot) snapshot_link = snapshot.extra['selfLink'] volume_data['sourceSnapshot'] = snapshot_link volume_data['description'] = 'Snapshot: %s' % (snapshot_link) location = location or self.zone if not hasattr(location, 'name'): location = self.ex_get_zone(location) if hasattr(ex_disk_type, 'name'): volume_data['type'] = ex_disk_type.extra['selfLink'] elif ex_disk_type.startswith('https'): volume_data['type'] = ex_disk_type else: volume_data['type'] = 'https://www.googleapis.com/compute/' volume_data['type'] += '%s/projects/%s/zones/%s/diskTypes/%s' % ( API_VERSION, self.project, location.name, ex_disk_type) request = '/zones/%s/disks' % (location.name) return request, volume_data, params def _to_disktype(self, disktype): """ Return a DiskType object from the JSON-response dictionary. :param disktype: The dictionary describing the disktype. :type disktype: ``dict`` :return: DiskType object :rtype: :class:`GCEDiskType` """ extra = {} zone = self.ex_get_zone(disktype['zone']) extra['selfLink'] = disktype.get('selfLink') extra['creationTimestamp'] = disktype.get('creationTimestamp') extra['description'] = disktype.get('description') extra['valid_disk_size'] = disktype.get('validDiskSize') extra['default_disk_size_gb'] = disktype.get('defaultDiskSizeGb') type_id = "%s:%s" % (zone.name, disktype['name']) return GCEDiskType(id=type_id, name=disktype['name'], zone=zone, driver=self, extra=extra) def _to_address(self, address): """ Return an Address object from the JSON-response dictionary. :param address: The dictionary describing the address. :type address: ``dict`` :return: Address object :rtype: :class:`GCEAddress` """ extra = {} if 'region' in address: region = self.ex_get_region(address['region']) else: region = 'global' extra['selfLink'] = address.get('selfLink') extra['status'] = address.get('status') extra['description'] = address.get('description', None) if address.get('users', None) is not None: extra['users'] = address.get('users') extra['creationTimestamp'] = address.get('creationTimestamp') return GCEAddress(id=address['id'], name=address['name'], address=address['address'], region=region, driver=self, extra=extra) def _to_backendservice(self, backendservice): """ Return a Backend Service object from the JSON-response dictionary. :param backendservice: The dictionary describing the backend service. :type backendservice: ``dict`` :return: BackendService object :rtype: :class:`GCEBackendService` """ extra = {} for extra_key in ('selfLink', 'creationTimestamp', 'fingerprint', 'description'): extra[extra_key] = backendservice.get(extra_key) backends = backendservice.get('backends', []) healthchecks = [self._get_object_by_kind(h) for h in backendservice.get('healthChecks', [])] return GCEBackendService( id=backendservice['id'], name=backendservice['name'], backends=backends, healthchecks=healthchecks, port=backendservice['port'], port_name=backendservice['portName'], protocol=backendservice['protocol'], timeout=backendservice['timeoutSec'], driver=self, extra=extra) def _to_healthcheck(self, healthcheck): """ Return a HealthCheck object from the JSON-response dictionary. :param healthcheck: The dictionary describing the healthcheck. :type healthcheck: ``dict`` :return: HealthCheck object :rtype: :class:`GCEHealthCheck` """ extra = {} extra['selfLink'] = healthcheck.get('selfLink') extra['creationTimestamp'] = healthcheck.get('creationTimestamp') extra['description'] = healthcheck.get('description') extra['host'] = healthcheck.get('host') return GCEHealthCheck( id=healthcheck['id'], name=healthcheck['name'], path=healthcheck.get('requestPath'), port=healthcheck.get('port'), interval=healthcheck.get('checkIntervalSec'), timeout=healthcheck.get('timeoutSec'), unhealthy_threshold=healthcheck.get('unhealthyThreshold'), healthy_threshold=healthcheck.get('healthyThreshold'), driver=self, extra=extra) def _to_firewall(self, firewall): """ Return a Firewall object from the JSON-response dictionary. :param firewall: The dictionary describing the firewall. :type firewall: ``dict`` :return: Firewall object :rtype: :class:`GCEFirewall` """ extra = {} extra['selfLink'] = firewall.get('selfLink') extra['creationTimestamp'] = firewall.get('creationTimestamp') extra['description'] = firewall.get('description') extra['network_name'] = self._get_components_from_path(firewall[ 'network'])['name'] network = self.ex_get_network(extra['network_name']) source_ranges = firewall.get('sourceRanges') source_tags = firewall.get('sourceTags') target_tags = firewall.get('targetTags') return GCEFirewall(id=firewall['id'], name=firewall['name'], allowed=firewall.get('allowed'), network=network, source_ranges=source_ranges, source_tags=source_tags, target_tags=target_tags, driver=self, extra=extra) def _to_forwarding_rule(self, forwarding_rule): """ Return a Forwarding Rule object from the JSON-response dictionary. :param forwarding_rule: The dictionary describing the rule. :type forwarding_rule: ``dict`` :return: ForwardingRule object :rtype: :class:`GCEForwardingRule` """ extra = {} extra['selfLink'] = forwarding_rule.get('selfLink') extra['portRange'] = forwarding_rule.get('portRange') extra['creationTimestamp'] = forwarding_rule.get('creationTimestamp') extra['description'] = forwarding_rule.get('description') region = forwarding_rule.get('region') if region: region = self.ex_get_region(region) target = self._get_object_by_kind(forwarding_rule['target']) return GCEForwardingRule(id=forwarding_rule['id'], name=forwarding_rule['name'], region=region, address=forwarding_rule.get('IPAddress'), protocol=forwarding_rule.get('IPProtocol'), targetpool=target, driver=self, extra=extra) def _to_sslcertificate(self, sslcertificate): """ Return the SslCertificate object from the JSON-response. :param sslcertificate: Dictionary describing SslCertificate :type sslcertificate: ``dict`` :return: Return SslCertificate object. :rtype: :class:`GCESslCertificate` """ extra = {} if 'description' in sslcertificate: extra['description'] = sslcertificate['description'] extra['selfLink'] = sslcertificate['selfLink'] return GCESslCertificate(id=sslcertificate['id'], name=sslcertificate['name'], certificate=sslcertificate['certificate'], driver=self, extra=extra) def _to_subnetwork(self, subnetwork): """ Return a Subnetwork object from the JSON-response dictionary. :param subnetwork: The dictionary describing the subnetwork. :type subnetwork: ``dict`` :return: Subnetwork object :rtype: :class:`GCESubnetwork` """ extra = {} extra['creationTimestamp'] = subnetwork.get('creationTimestamp') extra['description'] = subnetwork.get('description') extra['gatewayAddress'] = subnetwork.get('gatewayAddress') extra['ipCidrRange'] = subnetwork.get('ipCidrRange') extra['network'] = subnetwork.get('network') extra['region'] = subnetwork.get('region') extra['selfLink'] = subnetwork.get('selfLink') network = self._get_object_by_kind(subnetwork.get('network')) region = self._get_object_by_kind(subnetwork.get('region')) return GCESubnetwork(id=subnetwork['id'], name=subnetwork['name'], cidr=subnetwork.get('ipCidrRange'), network=network, region=region, driver=self, extra=extra) def _to_network(self, network): """ Return a Network object from the JSON-response dictionary. :param network: The dictionary describing the network. :type network: ``dict`` :return: Network object :rtype: :class:`GCENetwork` """ extra = {} extra['selfLink'] = network.get('selfLink') extra['description'] = network.get('description') extra['creationTimestamp'] = network.get('creationTimestamp') # 'legacy' extra['gatewayIPv4'] = network.get('gatewayIPv4') extra['IPv4Range'] = network.get('IPv4Range') # 'auto' or 'custom' extra['autoCreateSubnetworks'] = network.get('autoCreateSubnetworks') extra['subnetworks'] = network.get('subnetworks') # match Cloud SDK 'gcloud' if 'autoCreateSubnetworks' in network: if network['autoCreateSubnetworks']: extra['mode'] = 'auto' else: extra['mode'] = 'custom' else: extra['mode'] = 'legacy' return GCENetwork(id=network['id'], name=network['name'], cidr=network.get('IPv4Range'), driver=self, extra=extra) def _to_route(self, route): """ Return a Route object from the JSON-response dictionary. :param route: The dictionary describing the route. :type route: ``dict`` :return: Route object :rtype: :class:`GCERoute` """ extra = {} extra['selfLink'] = route.get('selfLink') extra['description'] = route.get('description') extra['creationTimestamp'] = route.get('creationTimestamp') network = route.get('network') priority = route.get('priority') if 'nextHopInstance' in route: extra['nextHopInstance'] = route['nextHopInstance'] if 'nextHopIp' in route: extra['nextHopIp'] = route['nextHopIp'] if 'nextHopNetwork' in route: extra['nextHopNetwork'] = route['nextHopNetwork'] if 'nextHopGateway' in route: extra['nextHopGateway'] = route['nextHopGateway'] if 'warnings' in route: extra['warnings'] = route['warnings'] return GCERoute(id=route['id'], name=route['name'], dest_range=route.get('destRange'), priority=priority, network=network, tags=route.get('tags'), driver=self, extra=extra) def _to_node_image(self, image): """ Return an Image object from the JSON-response dictionary. :param image: The dictionary describing the image. :type image: ``dict`` :return: Image object :rtype: :class:`GCENodeImage` """ extra = {} if 'preferredKernel' in image: extra['preferredKernel'] = image.get('preferredKernel', None) extra['description'] = image.get('description', None) extra['family'] = image.get('family', None) extra['creationTimestamp'] = image.get('creationTimestamp') extra['selfLink'] = image.get('selfLink') if 'deprecated' in image: extra['deprecated'] = image.get('deprecated', None) extra['sourceType'] = image.get('sourceType', None) extra['rawDisk'] = image.get('rawDisk', None) extra['status'] = image.get('status', None) extra['archiveSizeBytes'] = image.get('archiveSizeBytes', None) extra['diskSizeGb'] = image.get('diskSizeGb', None) if 'guestOsFeatures' in image: extra['guestOsFeatures'] = image.get('guestOsFeatures', []) if 'sourceDisk' in image: extra['sourceDisk'] = image.get('sourceDisk', None) if 'sourceDiskId' in image: extra['sourceDiskId'] = image.get('sourceDiskId', None) if 'licenses' in image: lic_objs = self._licenses_from_urls(licenses=image['licenses']) extra['licenses'] = lic_objs return GCENodeImage(id=image['id'], name=image['name'], driver=self, extra=extra) def _to_node_location(self, location): """ Return a Location object from the JSON-response dictionary. :param location: The dictionary describing the location. :type location: ``dict`` :return: Location object :rtype: :class:`NodeLocation` """ return NodeLocation(id=location['id'], name=location['name'], country=location['name'].split('-')[0], driver=self) def _to_node(self, node, use_disk_cache=False): """ Return a Node object from the JSON-response dictionary. :param node: The dictionary describing the node. :type node: ``dict`` :keyword use_disk_cache: If true, ex_get_volume call will use cache. :type use_disk_cache: ``bool`` :return: Node object :rtype: :class:`Node` """ public_ips = [] private_ips = [] extra = {} extra['status'] = node.get('status', "UNKNOWN") extra['statusMessage'] = node.get('statusMessage') extra['description'] = node.get('description') extra['zone'] = self.ex_get_zone(node['zone']) extra['image'] = node.get('image') extra['machineType'] = node.get('machineType') extra['disks'] = node.get('disks', []) extra['networkInterfaces'] = node.get('networkInterfaces') extra['id'] = node['id'] extra['selfLink'] = node.get('selfLink') extra['kind'] = node.get('kind') extra['creationTimestamp'] = node.get('creationTimestamp') extra['name'] = node['name'] extra['metadata'] = node.get('metadata', {}) extra['tags_fingerprint'] = node['tags']['fingerprint'] extra['scheduling'] = node.get('scheduling', {}) extra['deprecated'] = True if node.get('deprecated', None) else False extra['canIpForward'] = node.get('canIpForward') extra['serviceAccounts'] = node.get('serviceAccounts', []) extra['scheduling'] = node.get('scheduling', {}) extra['boot_disk'] = None for disk in extra['disks']: if disk.get('boot') and disk.get('type') == 'PERSISTENT': bd = self._get_components_from_path(disk['source']) extra['boot_disk'] = self.ex_get_volume( bd['name'], bd['zone'], use_cache=use_disk_cache) if 'items' in node['tags']: tags = node['tags']['items'] else: tags = [] extra['tags'] = tags for network_interface in node.get('networkInterfaces', []): private_ips.append(network_interface.get('networkIP')) for access_config in network_interface.get('accessConfigs', []): public_ips.append(access_config.get('natIP')) # For the node attributes, use just machine and image names, not full # paths. Full paths are available in the "extra" dict. image = None if extra['image']: image = self._get_components_from_path(extra['image'])['name'] else: if extra['boot_disk'] and \ hasattr(extra['boot_disk'], 'extra') and \ 'sourceImage' in extra['boot_disk'].extra and \ extra['boot_disk'].extra['sourceImage'] is not None: src_image = extra['boot_disk'].extra['sourceImage'] image = self._get_components_from_path(src_image)['name'] extra['image'] = image size = self._get_components_from_path(node['machineType'])['name'] return Node(id=node['id'], name=node['name'], state=self.NODE_STATE_MAP[node['status']], public_ips=public_ips, private_ips=private_ips, driver=self, size=size, image=image, extra=extra) def _to_node_size(self, machine_type): """ Return a Size object from the JSON-response dictionary. :param machine_type: The dictionary describing the machine. :type machine_type: ``dict`` :return: Size object :rtype: :class:`GCENodeSize` """ extra = {} extra['selfLink'] = machine_type.get('selfLink') extra['zone'] = self.ex_get_zone(machine_type['zone']) extra['description'] = machine_type.get('description') extra['guestCpus'] = machine_type.get('guestCpus') extra['creationTimestamp'] = machine_type.get('creationTimestamp') try: orig_api_name = self.api_name self.api_name = "%s_%s" % (self.api_name, extra['zone'].name.split("-")[0]) price = self._get_size_price(size_id=machine_type['name']) self.api_name = orig_api_name except: price = None return GCENodeSize(id=machine_type['id'], name=machine_type['name'], ram=machine_type.get('memoryMb'), disk=machine_type.get('imageSpaceGb'), bandwidth=0, price=price, driver=self, extra=extra) def _to_project(self, project): """ Return a Project object from the JSON-response dictionary. :param project: The dictionary describing the project. :type project: ``dict`` :return: Project object :rtype: :class:`GCEProject` """ extra = {} extra['selfLink'] = project.get('selfLink') extra['creationTimestamp'] = project.get('creationTimestamp') extra['description'] = project.get('description') metadata = project['commonInstanceMetadata'].get('items') if 'commonInstanceMetadata' in project: # add this struct to get 'fingerprint' too extra['commonInstanceMetadata'] = project['commonInstanceMetadata'] if 'usageExportLocation' in project: extra['usageExportLocation'] = project['usageExportLocation'] return GCEProject(id=project['id'], name=project['name'], metadata=metadata, quotas=project.get('quotas'), driver=self, extra=extra) def _to_region(self, region): """ Return a Region object from the JSON-response dictionary. :param region: The dictionary describing the region. :type region: ``dict`` :return: Region object :rtype: :class:`GCERegion` """ extra = {} extra['selfLink'] = region.get('selfLink') extra['creationTimestamp'] = region.get('creationTimestamp') extra['description'] = region.get('description') quotas = region.get('quotas') zones = [self.ex_get_zone(z) for z in region.get('zones', [])] # Work around a bug that will occasionally list missing zones in the # region output zones = [z for z in zones if z is not None] deprecated = region.get('deprecated') return GCERegion(id=region['id'], name=region['name'], status=region.get('status'), zones=zones, quotas=quotas, deprecated=deprecated, driver=self, extra=extra) def _to_snapshot(self, snapshot): """ Return a Snapshot object from the JSON-response dictionary. :param snapshot: The dictionary describing the snapshot :type snapshot: ``dict`` :return: Snapshot object :rtype: :class:`VolumeSnapshot` """ extra = {} extra['selfLink'] = snapshot.get('selfLink') extra['creationTimestamp'] = snapshot.get('creationTimestamp') extra['sourceDisk'] = snapshot.get('sourceDisk') if 'description' in snapshot: extra['description'] = snapshot['description'] if 'sourceDiskId' in snapshot: extra['sourceDiskId'] = snapshot['sourceDiskId'] if 'storageBytes' in snapshot: extra['storageBytes'] = snapshot['storageBytes'] if 'storageBytesStatus' in snapshot: extra['storageBytesStatus'] = snapshot['storageBytesStatus'] if 'licenses' in snapshot: lic_objs = self._licenses_from_urls(licenses=snapshot['licenses']) extra['licenses'] = lic_objs try: created = parse_date(snapshot.get('creationTimestamp')) except ValueError: created = None return GCESnapshot(id=snapshot['id'], name=snapshot['name'], size=snapshot['diskSizeGb'], status=snapshot.get('status'), driver=self, extra=extra, created=created) def _to_storage_volume(self, volume): """ Return a Volume object from the JSON-response dictionary. :param volume: The dictionary describing the volume. :type volume: ``dict`` :return: Volume object :rtype: :class:`StorageVolume` """ extra = {} extra['selfLink'] = volume.get('selfLink') extra['zone'] = self.ex_get_zone(volume['zone']) extra['status'] = volume.get('status') extra['creationTimestamp'] = volume.get('creationTimestamp') extra['description'] = volume.get('description') extra['sourceImage'] = volume.get('sourceImage') extra['sourceImageId'] = volume.get('sourceImageId') extra['sourceSnapshot'] = volume.get('sourceSnapshot') extra['sourceSnapshotId'] = volume.get('sourceSnapshotId') extra['options'] = volume.get('options') if 'licenses' in volume: lic_objs = self._licenses_from_urls(licenses=volume['licenses']) extra['licenses'] = lic_objs extra['type'] = volume.get('type', 'pd-standard').split('/')[-1] return StorageVolume(id=volume['id'], name=volume['name'], size=volume['sizeGb'], driver=self, extra=extra) def _to_targethttpproxy(self, targethttpproxy): """ Return a Target HTTP Proxy object from the JSON-response dictionary. :param targethttpproxy: The dictionary describing the proxy. :type targethttpproxy: ``dict`` :return: Target HTTP Proxy object :rtype: :class:`GCETargetHttpProxy` """ extra = dict( [(k, targethttpproxy.get(k)) for k in ('creationTimestamp', 'description', 'selfLink')]) urlmap = self._get_object_by_kind(targethttpproxy.get('urlMap')) return GCETargetHttpProxy(id=targethttpproxy['id'], name=targethttpproxy['name'], urlmap=urlmap, driver=self, extra=extra) def _to_targethttpsproxy(self, targethttpsproxy): """ Return the TargetHttpsProxy object from the JSON-response. :param targethttpsproxy: Dictionary describing TargetHttpsProxy :type targethttpsproxy: ``dict`` :return: Return TargetHttpsProxy object. :rtype: :class:`GCETargetHttpsProxy` """ extra = {} if 'description' in targethttpsproxy: extra['description'] = targethttpsproxy['description'] extra['selfLink'] = targethttpsproxy['selfLink'] sslcertificates = [ self._get_object_by_kind(x) for x in targethttpsproxy.get('sslCertificates', []) ] obj_name = self._get_components_from_path(targethttpsproxy['urlMap'])[ 'name'] urlmap = self.ex_get_urlmap(obj_name) return GCETargetHttpsProxy(id=targethttpsproxy['id'], name=targethttpsproxy['name'], sslcertificates=sslcertificates, urlmap=urlmap, driver=self, extra=extra) def _to_targetinstance(self, targetinstance): """ Return a Target Instance object from the JSON-response dictionary. :param targetinstance: The dictionary describing the target instance. :type targetinstance: ``dict`` :return: Target Instance object :rtype: :class:`GCETargetInstance` """ node = None extra = {} extra['selfLink'] = targetinstance.get('selfLink') extra['description'] = targetinstance.get('description') extra['natPolicy'] = targetinstance.get('natPolicy') zone = self.ex_get_zone(targetinstance['zone']) if 'instance' in targetinstance: node_name = targetinstance['instance'].split('/')[-1] try: node = self.ex_get_node(node_name, zone) except ResourceNotFoundError: node = targetinstance['instance'] return GCETargetInstance(id=targetinstance['id'], name=targetinstance['name'], zone=zone, node=node, driver=self, extra=extra) def _to_targetpool(self, targetpool): """ Return a Target Pool object from the JSON-response dictionary. :param targetpool: The dictionary describing the volume. :type targetpool: ``dict`` :return: Target Pool object :rtype: :class:`GCETargetPool` """ extra = {} extra['selfLink'] = targetpool.get('selfLink') extra['description'] = targetpool.get('description') extra['sessionAffinity'] = targetpool.get('sessionAffinity') region = self.ex_get_region(targetpool['region']) healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h in targetpool.get('healthChecks', [])] node_list = [] for n in targetpool.get('instances', []): # Nodes that do not exist can be part of a target pool. If the # node does not exist, use the URL of the node instead of the node # object. comp = self._get_components_from_path(n) try: node = self.ex_get_node(comp['name'], comp['zone']) except ResourceNotFoundError: node = n node_list.append(node) if 'failoverRatio' in targetpool: extra['failoverRatio'] = targetpool['failoverRatio'] if 'backupPool' in targetpool: tp_split = targetpool['backupPool'].split('/') extra['backupPool'] = self.ex_get_targetpool(tp_split[10], tp_split[8]) return GCETargetPool(id=targetpool['id'], name=targetpool['name'], region=region, healthchecks=healthcheck_list, nodes=node_list, driver=self, extra=extra) def _to_instancegroup(self, instancegroup): """ Return the InstanceGroup object from the JSON-response. :param instancegroup: Dictionary describing InstanceGroup :type instancegroup: ``dict`` :return: InstanceGroup object. :rtype: :class:`GCEInstanceGroup` """ extra = {} extra['description'] = instancegroup.get('description', None) extra['selfLink'] = instancegroup['selfLink'] extra['namedPorts'] = instancegroup.get('namedPorts', []) extra['fingerprint'] = instancegroup.get('fingerprint', None) zone = self.ex_get_zone(instancegroup['zone']) # Note: network/subnetwork will not be available if the Instance Group # does not contain instances. network = instancegroup.get('network', None) if network: obj_name = self._get_components_from_path(network)['name'] network = self.ex_get_network(obj_name) subnetwork = instancegroup.get('subnetwork', None) if subnetwork: parts = self._get_components_from_path(subnetwork) subnetwork = self.ex_get_subnetwork(parts['name'], parts['region']) return GCEInstanceGroup( id=instancegroup['id'], name=instancegroup['name'], zone=zone, network=network, subnetwork=subnetwork, named_ports=instancegroup.get('namedPorts', []), driver=self, extra=extra) def _to_instancegroupmanager(self, manager): """ Return a Instance Group Manager object from the JSON-response. :param instancegroupmanager: dictionary describing the Instance Group Manager. :type instancegroupmanager: ``dict`` :return: Instance Group Manager object. :rtype: :class:`GCEInstanceGroupManager` """ zone = self.ex_get_zone(manager['zone']) extra = {} extra['selfLink'] = manager.get('selfLink') extra['description'] = manager.get('description') extra['currentActions'] = manager.get('currentActions') extra['baseInstanceName'] = manager.get('baseInstanceName') extra['namedPorts'] = manager.get('namedPorts', []) template_name = self._get_components_from_path(manager[ 'instanceTemplate'])['name'] template = self.ex_get_instancetemplate(template_name) ig_name = self._get_components_from_path(manager['instanceGroup'])[ 'name'] instance_group = self.ex_get_instancegroup(ig_name, zone) return GCEInstanceGroupManager( id=manager['id'], name=manager['name'], zone=zone, size=manager['targetSize'], instance_group=instance_group, template=template, driver=self, extra=extra) def _to_instancetemplate(self, instancetemplate): """ Return a Instance Template object from the JSON-response. :param instancetemplate: dictionary describing the Instance Template. :type instancetemplate: ``dict`` :return: Instance Template object. :rtype: :class:`GCEInstanceTemplate` """ extra = {} extra['selfLink'] = instancetemplate.get('selfLink') extra['description'] = instancetemplate.get('description') extra['properties'] = instancetemplate.get('properties') return GCEInstanceTemplate(id=instancetemplate['id'], name=instancetemplate['name'], driver=self, extra=extra) def _to_autoscaler(self, autoscaler): """ Return an Autoscaler object from the JSON-response. :param autoscaler: dictionary describing the Autoscaler. :type autoscaler: ``dict`` :return: Autoscaler object. :rtype: :class:`GCEAutoscaler` """ extra = {} extra['selfLink'] = autoscaler.get('selfLink') extra['description'] = autoscaler.get('description') zone = self.ex_get_zone(autoscaler.get('zone')) ig_name = self._get_components_from_path(autoscaler.get('target'))[ 'name'] target = self.ex_get_instancegroupmanager(ig_name, zone) return GCEAutoscaler(id=autoscaler['id'], name=autoscaler['name'], zone=zone, target=target, policy=autoscaler['autoscalingPolicy'], driver=self, extra=extra) def _format_metadata(self, fingerprint, metadata=None): """ Convert various data formats into the metadata format expected by Google Compute Engine and suitable for passing along to the API. Can accept the following formats: (a) [{'key': 'k1', 'value': 'v1'}, ...] (b) [{'k1': 'v1'}, ...] (c) {'key': 'k1', 'value': 'v1'} (d) {'k1': 'v1', 'k2': v2', ...} (e) {'items': [...]} # does not check for valid list contents The return value is a 'dict' that GCE expects, e.g. {'fingerprint': 'xx...', 'items': [{'key': 'key1', 'value': 'val1'}, {'key': 'key2', 'value': 'val2'}, ..., ] } :param fingerprint: Current metadata fingerprint :type fingerprint: ``str`` :param metadata: Variety of input formats. :type metadata: ``list``, ``dict``, or ``None`` :return: GCE-friendly metadata dict :rtype: ``dict`` """ if not metadata: return {'fingerprint': fingerprint, 'items': []} md = {'fingerprint': fingerprint} # Check `list` format. Can support / convert the following: # (a) [{'key': 'k1', 'value': 'v1'}, ...] # (b) [{'k1': 'v1'}, ...] if isinstance(metadata, list): item_list = [] for i in metadata: if isinstance(i, dict): # check (a) if 'key' in i and 'value' in i and len(i) == 2: item_list.append(i) # check (b) elif len(i) == 1: item_list.append({'key': list(i.keys())[0], 'value': list(i.values())[0]}) else: raise ValueError("Unsupported metadata format.") else: raise ValueError("Unsupported metadata format.") md['items'] = item_list # Check `dict` format. Can support / convert the following: # (c) {'key': 'k1', 'value': 'v1'} # (d) {'k1': 'v1', 'k2': 'v2', ...} # (e) {'items': [...]} if isinstance(metadata, dict): # Check (c) if 'key' in metadata and 'value' in metadata and \ len(metadata) == 2: md['items'] = [metadata] # check (d) elif len(metadata) == 1: if 'items' in metadata: # check (e) if isinstance(metadata['items'], list): md['items'] = metadata['items'] else: raise ValueError("Unsupported metadata format.") else: md['items'] = [{'key': list(metadata.keys())[0], 'value': list(metadata.values())[0]}] else: # check (d) md['items'] = [] for k, v in metadata.items(): md['items'].append({'key': k, 'value': v}) if 'items' not in md: raise ValueError("Unsupported metadata format.") return md def _to_urlmap(self, urlmap): """ Return a UrlMap object from the JSON-response dictionary. :param zone: The dictionary describing the url-map. :type zone: ``dict`` :return: UrlMap object :rtype: :class:`GCEUrlMap` """ extra = dict([(k, urlmap.get(k)) for k in ('creationTimestamp', 'description', 'fingerprint', 'selfLink')]) default_service = self._get_object_by_kind( urlmap.get('defaultService')) host_rules = urlmap.get('hostRules', []) path_matchers = urlmap.get('pathMatchers', []) tests = urlmap.get('tests', []) return GCEUrlMap(id=urlmap['id'], name=urlmap['name'], default_service=default_service, host_rules=host_rules, path_matchers=path_matchers, tests=tests, driver=self, extra=extra) def _to_zone(self, zone): """ Return a Zone object from the JSON-response dictionary. :param zone: The dictionary describing the zone. :type zone: ``dict`` :return: Zone object :rtype: :class:`GCEZone` """ extra = {} extra['selfLink'] = zone.get('selfLink') extra['creationTimestamp'] = zone.get('creationTimestamp') extra['description'] = zone.get('description') extra['region'] = zone.get('region') deprecated = zone.get('deprecated') return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'], maintenance_windows=zone.get('maintenanceWindows'), deprecated=deprecated, driver=self, extra=extra) def _set_project_metadata(self, metadata=None, force=False, current_keys=""): """ Return the GCE-friendly dictionary of metadata with/without an entry for 'sshKeys' based on params for 'force' and 'current_keys'. This method was added to simplify the set_common_instance_metadata method and make it easier to test. :param metadata: The GCE-formatted dict (e.g. 'items' list of dicts) :type metadata: ``dict`` or ``None`` :param force: Flag to specify user preference for keeping current_keys :type force: ``bool`` :param current_keys: The value, if any, of existing 'sshKeys' :type current_keys: ``str`` :return: GCE-friendly metadata dict :rtype: ``dict`` """ if metadata is None: # User wants to delete metdata, but if 'force' is False # and we already have sshKeys, we should retain them. # Otherwise, delete ALL THE THINGS! if not force and current_keys: new_md = [{'key': 'sshKeys', 'value': current_keys}] else: new_md = [] else: # User is providing new metadata. If 'force' is False, they # want to preserve existing sshKeys, otherwise 'force' is True # and the user wants to add/replace sshKeys. new_md = metadata['items'] if not force and current_keys: # not sure how duplicate keys would be resolved, so ensure # existing 'sshKeys' entry is removed. updated_md = [] for d in new_md: if d['key'] != 'sshKeys': updated_md.append({'key': d['key'], 'value': d['value']}) new_md = updated_md new_md.append({'key': 'sshKeys', 'value': current_keys}) return new_md def _licenses_from_urls(self, licenses): """ Convert a list of license selfLinks into a list of :class:`GCELicense` objects. :param licenses: A list of GCE license selfLink URLs. :type licenses: ``list`` of ``str`` :return: List of :class:`GCELicense` objects. :rtype: ``list`` """ return_list = [] for license in licenses: selfLink_parts = license.split('/') lic_proj = selfLink_parts[6] lic_name = selfLink_parts[-1] return_list.append( self.ex_get_license(project=lic_proj, name=lic_name)) return return_list KIND_METHOD_MAP = { 'compute#address': _to_address, 'compute#backendService': _to_backendservice, 'compute#disk': _to_storage_volume, 'compute#firewall': _to_firewall, 'compute#forwardingRule': _to_forwarding_rule, 'compute#httpHealthCheck': _to_healthcheck, 'compute#image': _to_node_image, 'compute#instance': _to_node, 'compute#machineType': _to_node_size, 'compute#network': _to_network, 'compute#project': _to_project, 'compute#region': _to_region, 'compute#snapshot': _to_snapshot, 'compute#sslCertificate': _to_sslcertificate, 'compute#targetHttpProxy': _to_targethttpproxy, 'compute#targetHttpsProxy': _to_targethttpsproxy, 'compute#targetInstance': _to_targetinstance, 'compute#targetPool': _to_targetpool, 'compute#urlMap': _to_urlmap, 'compute#zone': _to_zone, } apache-libcloud-2.2.1/libcloud/compute/drivers/indosat.py0000664000175000017500000000371512701023453023363 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Indosat Driver """ from libcloud.compute.providers import Provider from libcloud.common.dimensiondata import (DimensionDataConnection, API_ENDPOINTS) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver DEFAULT_REGION = 'indosat-id' class IndosatNodeDriver(DimensionDataNodeDriver): """ Indosat node driver, based on Dimension Data driver """ selected_region = None connectionCls = DimensionDataConnection name = 'Indosat' website = 'http://www.indosat.com/' type = Provider.INDOSAT features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self.selected_region = API_ENDPOINTS[region] super(IndosatNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/opennebula.py0000664000175000017500000012501313153541406024053 0ustar kamikami00000000000000# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad # Complutense de Madrid (dsa-research.org) # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenNebula.org driver. """ __docformat__ = 'epytext' from base64 import b64encode import hashlib from libcloud.utils.py3 import ET from libcloud.utils.py3 import httplib from libcloud.utils.py3 import next from libcloud.utils.py3 import b from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation from libcloud.common.base import ConnectionUserAndKey, XmlResponse from libcloud.compute.base import NodeImage, NodeSize, StorageVolume from libcloud.common.types import InvalidCredsError from libcloud.compute.providers import Provider __all__ = [ 'ACTION', 'OpenNebulaResponse', 'OpenNebulaConnection', 'OpenNebulaNodeSize', 'OpenNebulaNetwork', 'OpenNebulaNodeDriver', 'OpenNebula_1_4_NodeDriver', 'OpenNebula_2_0_NodeDriver', 'OpenNebula_3_0_NodeDriver', 'OpenNebula_3_2_NodeDriver', 'OpenNebula_3_8_NodeDriver'] API_HOST = '' API_PORT = (4567, 443) API_SECURE = True API_PLAIN_AUTH = False DEFAULT_API_VERSION = '3.2' class ACTION(object): """ All actions, except RESUME, only apply when the VM is in the "Running" state. """ STOP = 'STOPPED' """ The VM is stopped, and its memory state stored to a checkpoint file. VM state, and disk image, are transferred back to the front-end. Resuming the VM requires the VM instance to be re-scheduled. """ SUSPEND = 'SUSPENDED' """ The VM is stopped, and its memory state stored to a checkpoint file. The VM state, and disk image, are left on the host to be resumed later. Resuming the VM does not require the VM to be re-scheduled. Rather, after suspending, the VM resources are reserved for later resuming. """ RESUME = 'RESUME' """ The VM is resumed using the saved memory state from the checkpoint file, and the VM's disk image. The VM is either started immediately, or re-scheduled depending on how it was suspended. """ CANCEL = 'CANCEL' """ The VM is forcibly shutdown, its memory state is deleted. If a persistent disk image was used, that disk image is transferred back to the front-end. Any non-persistent disk images are deleted. """ SHUTDOWN = 'SHUTDOWN' """ The VM is gracefully shutdown by sending the ACPI signal. If the VM does not shutdown, then it is considered to still be running. If successfully, shutdown, its memory state is deleted. If a persistent disk image was used, that disk image is transferred back to the front-end. Any non-persistent disk images are deleted. """ REBOOT = 'REBOOT' """ Introduced in OpenNebula v3.2. The VM is gracefully restarted by sending the ACPI signal. """ DONE = 'DONE' """ The VM is forcibly shutdown, its memory state is deleted. If a persistent disk image was used, that disk image is transferred back to the front-end. Any non-persistent disk images are deleted. """ class OpenNebulaResponse(XmlResponse): """ XmlResponse class for the OpenNebula.org driver. """ def success(self): """ Check if response has the appropriate HTTP response code to be a success. :rtype: ``bool`` :return: True is success, else False. """ i = int(self.status) return i >= 200 and i <= 299 def parse_error(self): """ Check if response contains any errors. @raise: :class:`InvalidCredsError` :rtype: :class:`ElementTree` :return: Contents of HTTP response body. """ if int(self.status) == httplib.UNAUTHORIZED: raise InvalidCredsError(self.body) return self.body class OpenNebulaConnection(ConnectionUserAndKey): """ Connection class for the OpenNebula.org driver. with plain_auth support """ host = API_HOST port = API_PORT secure = API_SECURE plain_auth = API_PLAIN_AUTH responseCls = OpenNebulaResponse def __init__(self, *args, **kwargs): if 'plain_auth' in kwargs: self.plain_auth = kwargs.pop('plain_auth') super(OpenNebulaConnection, self).__init__(*args, **kwargs) def add_default_headers(self, headers): """ Add headers required by the OpenNebula.org OCCI interface. Includes adding Basic HTTP Authorization headers for authenticating against the OpenNebula.org OCCI interface. :type headers: ``dict`` :param headers: Dictionary containing HTTP headers. :rtype: ``dict`` :return: Dictionary containing updated headers. """ if self.plain_auth: passwd = self.key else: passwd = hashlib.sha1(b(self.key)).hexdigest() headers['Authorization'] =\ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, passwd))).decode('utf-8')) return headers class OpenNebulaNodeSize(NodeSize): """ NodeSize class for the OpenNebula.org driver. """ def __init__(self, id, name, ram, disk, bandwidth, price, driver, cpu=None, vcpu=None): super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram, disk=disk, bandwidth=bandwidth, price=price, driver=driver) self.cpu = cpu self.vcpu = vcpu def __repr__(self): return (('') % (self.id, self.name, self.ram, self.disk, self.bandwidth, self.price, self.driver.name, self.cpu, self.vcpu)) class OpenNebulaNetwork(object): """ Provide a common interface for handling networks of all types. Network objects are analogous to physical switches connecting two or more physical nodes together. The Network object provides the interface in libcloud through which we can manipulate networks in different cloud providers in the same way. Network objects don't actually do much directly themselves, instead the network driver handles the connection to the network. You don't normally create a network object yourself; instead you use a driver and then have that create the network for you. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver() >>> network = driver.create_network() >>> network = driver.list_networks()[0] >>> network.name 'dummy-1' """ def __init__(self, id, name, address, size, driver, extra=None): self.id = str(id) self.name = name self.address = address self.size = size self.driver = driver self.uuid = self.get_uuid() self.extra = extra or {} def get_uuid(self): """ Unique hash for this network. The hash is a function of an SHA1 hash of the network's ID and its driver which means that it should be unique between all networks. In some subclasses (e.g. GoGrid) there is no ID available so the public IP address is used. This means that, unlike a properly done system UUID, the same UUID may mean a different system install at a different time >>> from libcloud.network.drivers.dummy import DummyNetworkDriver >>> driver = DummyNetworkDriver() >>> network = driver.create_network() >>> network.get_uuid() 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' Note, for example, that this example will always produce the same UUID! :rtype: ``str`` :return: Unique identifier for this instance. """ return hashlib.sha1(b("%s:%s" % (self.id, self.driver.type))).hexdigest() def __repr__(self): return (('') % (self.uuid, self.name, self.address, self.size, self.driver.name)) class OpenNebulaNodeDriver(NodeDriver): """ OpenNebula.org node driver. """ connectionCls = OpenNebulaConnection name = 'OpenNebula' website = 'http://opennebula.org/' type = Provider.OPENNEBULA NODE_STATE_MAP = { 'INIT': NodeState.PENDING, 'PENDING': NodeState.PENDING, 'HOLD': NodeState.PENDING, 'ACTIVE': NodeState.RUNNING, 'STOPPED': NodeState.TERMINATED, 'SUSPENDED': NodeState.PENDING, 'DONE': NodeState.TERMINATED, 'FAILED': NodeState.TERMINATED} def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION, **kwargs): if cls is OpenNebulaNodeDriver: if api_version in ['1.4']: cls = OpenNebula_1_4_NodeDriver elif api_version in ['2.0', '2.2']: cls = OpenNebula_2_0_NodeDriver elif api_version in ['3.0']: cls = OpenNebula_3_0_NodeDriver elif api_version in ['3.2']: cls = OpenNebula_3_2_NodeDriver elif api_version in ['3.6']: cls = OpenNebula_3_6_NodeDriver elif api_version in ['3.8']: cls = OpenNebula_3_8_NodeDriver if 'plain_auth' not in kwargs: kwargs['plain_auth'] = cls.plain_auth else: cls.plain_auth = kwargs['plain_auth'] else: raise NotImplementedError( "No OpenNebulaNodeDriver found for API version %s" % (api_version)) return super(OpenNebulaNodeDriver, cls).__new__(cls) def create_node(self, **kwargs): """ Create a new OpenNebula node. @inherits: :class:`NodeDriver.create_node` :keyword networks: List of virtual networks to which this node should connect. (optional) :type networks: :class:`OpenNebulaNetwork` or ``list`` of :class:`OpenNebulaNetwork` """ compute = ET.Element('COMPUTE') name = ET.SubElement(compute, 'NAME') name.text = kwargs['name'] instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') instance_type.text = kwargs['size'].name storage = ET.SubElement(compute, 'STORAGE') ET.SubElement(storage, 'DISK', {'image': '%s' % (str(kwargs['image'].id))}) if 'networks' in kwargs: if not isinstance(kwargs['networks'], list): kwargs['networks'] = [kwargs['networks']] networkGroup = ET.SubElement(compute, 'NETWORK') for network in kwargs['networks']: if network.address: ET.SubElement(networkGroup, 'NIC', {'network': '%s' % (str(network.id)), 'ip': network.address}) else: ET.SubElement(networkGroup, 'NIC', {'network': '%s' % (str(network.id))}) xml = ET.tostring(compute) node = self.connection.request('/compute', method='POST', data=xml).object return self._to_node(node) def destroy_node(self, node): url = '/compute/%s' % (str(node.id)) resp = self.connection.request(url, method='DELETE') return resp.status == httplib.OK def list_nodes(self): return self._to_nodes(self.connection.request('/compute').object) def list_images(self, location=None): return self._to_images(self.connection.request('/storage').object) def list_sizes(self, location=None): """ Return list of sizes on a provider. @inherits: :class:`NodeDriver.list_sizes` :return: List of compute node sizes supported by the cloud provider. :rtype: ``list`` of :class:`OpenNebulaNodeSize` """ return [ NodeSize(id=1, name='small', ram=None, disk=None, bandwidth=None, price=None, driver=self), NodeSize(id=2, name='medium', ram=None, disk=None, bandwidth=None, price=None, driver=self), NodeSize(id=3, name='large', ram=None, disk=None, bandwidth=None, price=None, driver=self), ] def list_locations(self): return [NodeLocation(0, '', '', self)] def ex_list_networks(self, location=None): """ List virtual networks on a provider. :param location: Location from which to request a list of virtual networks. (optional) :type location: :class:`NodeLocation` :return: List of virtual networks available to be connected to a compute node. :rtype: ``list`` of :class:`OpenNebulaNetwork` """ return self._to_networks(self.connection.request('/network').object) def ex_node_action(self, node, action): """ Build action representation and instruct node to commit action. Build action representation from the compute node ID, and the action which should be carried out on that compute node. Then instruct the node to carry out that action. :param node: Compute node instance. :type node: :class:`Node` :param action: Action to be carried out on the compute node. :type action: ``str`` :return: False if an HTTP Bad Request is received, else, True is returned. :rtype: ``bool`` """ compute_node_id = str(node.id) compute = ET.Element('COMPUTE') compute_id = ET.SubElement(compute, 'ID') compute_id.text = compute_node_id state = ET.SubElement(compute, 'STATE') state.text = action xml = ET.tostring(compute) url = '/compute/%s' % compute_node_id resp = self.connection.request(url, method='PUT', data=xml) if resp.status == httplib.BAD_REQUEST: return False else: return True def _to_images(self, object): """ Request a list of images and convert that list to a list of NodeImage objects. Request a list of images from the OpenNebula web interface, and issue a request to convert each XML object representation of an image to a NodeImage object. :rtype: ``list`` of :class:`NodeImage` :return: List of images. """ images = [] for element in object.findall('DISK'): image_id = element.attrib['href'].partition('/storage/')[2] image = self.connection.request( ('/storage/%s' % (image_id))).object images.append(self._to_image(image)) return images def _to_image(self, image): """ Take XML object containing an image description and convert to NodeImage object. :type image: :class:`ElementTree` :param image: XML representation of an image. :rtype: :class:`NodeImage` :return: The newly extracted :class:`NodeImage`. """ return NodeImage(id=image.findtext('ID'), name=image.findtext('NAME'), driver=self.connection.driver, extra={'size': image.findtext('SIZE'), 'url': image.findtext('URL')}) def _to_networks(self, object): """ Request a list of networks and convert that list to a list of OpenNebulaNetwork objects. Request a list of networks from the OpenNebula web interface, and issue a request to convert each XML object representation of a network to an OpenNebulaNetwork object. :rtype: ``list`` of :class:`OpenNebulaNetwork` :return: List of virtual networks. """ networks = [] for element in object.findall('NETWORK'): network_id = element.attrib['href'].partition('/network/')[2] network_element = self.connection.request( ('/network/%s' % (network_id))).object networks.append(self._to_network(network_element)) return networks def _to_network(self, element): """ Take XML object containing a network description and convert to OpenNebulaNetwork object. Take XML representation containing a network description and convert to OpenNebulaNetwork object. :rtype: :class:`OpenNebulaNetwork` :return: The newly extracted :class:`OpenNebulaNetwork`. """ return OpenNebulaNetwork(id=element.findtext('ID'), name=element.findtext('NAME'), address=element.findtext('ADDRESS'), size=element.findtext('SIZE'), driver=self.connection.driver) def _to_nodes(self, object): """ Request a list of compute nodes and convert that list to a list of Node objects. Request a list of compute nodes from the OpenNebula web interface, and issue a request to convert each XML object representation of a node to a Node object. :rtype: ``list`` of :class:`Node` :return: A list of compute nodes. """ computes = [] for element in object.findall('COMPUTE'): compute_id = element.attrib['href'].partition('/compute/')[2] compute = self.connection.request( ('/compute/%s' % (compute_id))).object computes.append(self._to_node(compute)) return computes def _to_node(self, compute): """ Take XML object containing a compute node description and convert to Node object. Take XML representation containing a compute node description and convert to Node object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: :class:`Node` :return: The newly extracted :class:`Node`. """ try: state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()] except KeyError: state = NodeState.UNKNOWN return Node(id=compute.findtext('ID'), name=compute.findtext('NAME'), state=state, public_ips=self._extract_networks(compute), private_ips=[], driver=self.connection.driver, image=self._extract_images(compute)) def _extract_networks(self, compute): """ Extract networks from a compute node XML representation. Extract network descriptions from a compute node XML representation, converting each network to an OpenNebulaNetwork object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: ``list`` of :class:`OpenNebulaNetwork`s. :return: List of virtual networks attached to the compute node. """ networks = list() network_list = compute.find('NETWORK') for element in network_list.findall('NIC'): networks.append( OpenNebulaNetwork(id=element.attrib.get('network', None), name=None, address=element.attrib.get('ip', None), size=1, driver=self.connection.driver)) return networks def _extract_images(self, compute): """ Extract image disks from a compute node XML representation. Extract image disk descriptions from a compute node XML representation, converting the disks to an NodeImage object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: :class:`NodeImage`. :return: First disk attached to a compute node. """ disks = list() disk_list = compute.find('STORAGE') if disk_list is not None: for element in disk_list.findall('DISK'): disks.append( NodeImage(id=element.attrib.get('image', None), name=None, driver=self.connection.driver, extra={'dev': element.attrib.get('dev', None)})) # @TODO: Return all disks when the Node type accepts multiple # attached disks per node. if len(disks) > 0: return disks[0] else: return None class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver): """ OpenNebula.org node driver for OpenNebula.org v1.4. """ name = 'OpenNebula (v1.4)' class OpenNebula_2_0_NodeDriver(OpenNebulaNodeDriver): """ OpenNebula.org node driver for OpenNebula.org v2.0 through OpenNebula.org v2.2. """ name = 'OpenNebula (v2.0 - v2.2)' def create_node(self, **kwargs): """ Create a new OpenNebula node. @inherits: :class:`NodeDriver.create_node` :keyword networks: List of virtual networks to which this node should connect. (optional) :type networks: :class:`OpenNebulaNetwork` or ``list`` of :class:`OpenNebulaNetwork` :keyword context: Custom (key, value) pairs to be injected into compute node XML description. (optional) :type context: ``dict`` :return: Instance of a newly created node. :rtype: :class:`Node` """ compute = ET.Element('COMPUTE') name = ET.SubElement(compute, 'NAME') name.text = kwargs['name'] instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') instance_type.text = kwargs['size'].name disk = ET.SubElement(compute, 'DISK') ET.SubElement(disk, 'STORAGE', {'href': '/storage/%s' % (str(kwargs['image'].id))}) if 'networks' in kwargs: if not isinstance(kwargs['networks'], list): kwargs['networks'] = [kwargs['networks']] for network in kwargs['networks']: nic = ET.SubElement(compute, 'NIC') ET.SubElement(nic, 'NETWORK', {'href': '/network/%s' % (str(network.id))}) if network.address: ip_line = ET.SubElement(nic, 'IP') ip_line.text = network.address if 'context' in kwargs: if isinstance(kwargs['context'], dict): contextGroup = ET.SubElement(compute, 'CONTEXT') for key, value in list(kwargs['context'].items()): context = ET.SubElement(contextGroup, key.upper()) context.text = value xml = ET.tostring(compute) node = self.connection.request('/compute', method='POST', data=xml).object return self._to_node(node) def destroy_node(self, node): url = '/compute/%s' % (str(node.id)) resp = self.connection.request(url, method='DELETE') return resp.status == httplib.NO_CONTENT def list_sizes(self, location=None): """ Return list of sizes on a provider. @inherits: :class:`NodeDriver.list_sizes` :return: List of compute node sizes supported by the cloud provider. :rtype: ``list`` of :class:`OpenNebulaNodeSize` """ return [ OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1, disk=None, bandwidth=None, price=None, driver=self), OpenNebulaNodeSize(id=2, name='medium', ram=4096, cpu=4, disk=None, bandwidth=None, price=None, driver=self), OpenNebulaNodeSize(id=3, name='large', ram=8192, cpu=8, disk=None, bandwidth=None, price=None, driver=self), OpenNebulaNodeSize(id=4, name='custom', ram=0, cpu=0, disk=None, bandwidth=None, price=None, driver=self), ] def _to_images(self, object): """ Request a list of images and convert that list to a list of NodeImage objects. Request a list of images from the OpenNebula web interface, and issue a request to convert each XML object representation of an image to a NodeImage object. :rtype: ``list`` of :class:`NodeImage` :return: List of images. """ images = [] for element in object.findall('STORAGE'): image_id = element.attrib["href"].partition("/storage/")[2] image = self.connection.request( ("/storage/%s" % (image_id))).object images.append(self._to_image(image)) return images def _to_image(self, image): """ Take XML object containing an image description and convert to NodeImage object. :type image: :class:`ElementTree` :param image: XML representation of an image. :rtype: :class:`NodeImage` :return: The newly extracted :class:`NodeImage`. """ return NodeImage(id=image.findtext('ID'), name=image.findtext('NAME'), driver=self.connection.driver, extra={'description': image.findtext('DESCRIPTION'), 'type': image.findtext('TYPE'), 'size': image.findtext('SIZE'), 'fstype': image.findtext('FSTYPE', None)}) def _to_node(self, compute): """ Take XML object containing a compute node description and convert to Node object. Take XML representation containing a compute node description and convert to Node object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: :class:`Node` :return: The newly extracted :class:`Node`. """ try: state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()] except KeyError: state = NodeState.UNKNOWN return Node(id=compute.findtext('ID'), name=compute.findtext('NAME'), state=state, public_ips=self._extract_networks(compute), private_ips=[], driver=self.connection.driver, image=self._extract_images(compute), size=self._extract_size(compute), extra={'context': self._extract_context(compute)}) def _extract_networks(self, compute): """ Extract networks from a compute node XML representation. Extract network descriptions from a compute node XML representation, converting each network to an OpenNebulaNetwork object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: ``list`` of :class:`OpenNebulaNetwork` :return: List of virtual networks attached to the compute node. """ networks = [] for element in compute.findall('NIC'): network = element.find('NETWORK') network_id = network.attrib['href'].partition('/network/')[2] networks.append( OpenNebulaNetwork(id=network_id, name=network.attrib.get('name', None), address=element.findtext('IP'), size=1, driver=self.connection.driver, extra={'mac': element.findtext('MAC')})) return networks def _extract_images(self, compute): """ Extract image disks from a compute node XML representation. Extract image disk descriptions from a compute node XML representation, converting the disks to an NodeImage object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: ``list`` of :class:`NodeImage` :return: Disks attached to a compute node. """ disks = list() for element in compute.findall('DISK'): disk = element.find('STORAGE') image_id = disk.attrib['href'].partition('/storage/')[2] if 'id' in element.attrib: disk_id = element.attrib['id'] else: disk_id = None disks.append( NodeImage(id=image_id, name=disk.attrib.get('name', None), driver=self.connection.driver, extra={'type': element.findtext('TYPE'), 'disk_id': disk_id, 'target': element.findtext('TARGET')})) # Return all disks when the Node type accepts multiple attached disks # per node. if len(disks) > 1: return disks elif len(disks) == 1: return disks[0] else: return None def _extract_size(self, compute): """ Extract size, or node type, from a compute node XML representation. Extract node size, or node type, description from a compute node XML representation, converting the node size to a NodeSize object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: :class:`OpenNebulaNodeSize` :return: Node type of compute node. """ instance_type = compute.find('INSTANCE_TYPE') try: return next((node_size for node_size in self.list_sizes() if node_size.name == instance_type.text)) except StopIteration: return None def _extract_context(self, compute): """ Extract size, or node type, from a compute node XML representation. Extract node size, or node type, description from a compute node XML representation, converting the node size to a NodeSize object. :type compute: :class:`ElementTree` :param compute: XML representation of a compute node. :rtype: ``dict`` :return: Dictionary containing (key, value) pairs related to compute node context. """ contexts = dict() context = compute.find('CONTEXT') if context is not None: for context_element in list(context): contexts[context_element.tag.lower()] = context_element.text return contexts class OpenNebula_3_0_NodeDriver(OpenNebula_2_0_NodeDriver): """ OpenNebula.org node driver for OpenNebula.org v3.0. """ name = 'OpenNebula (v3.0)' def ex_node_set_save_name(self, node, name): """ Build action representation and instruct node to commit action. Build action representation from the compute node ID, the disk image which will be saved, and the name under which the image will be saved upon shutting down the compute node. :param node: Compute node instance. :type node: :class:`Node` :param name: Name under which the image should be saved after shutting down the compute node. :type name: ``str`` :return: False if an HTTP Bad Request is received, else, True is returned. :rtype: ``bool`` """ compute_node_id = str(node.id) compute = ET.Element('COMPUTE') compute_id = ET.SubElement(compute, 'ID') compute_id.text = compute_node_id disk = ET.SubElement(compute, 'DISK', {'id': str(node.image.id)}) ET.SubElement(disk, 'STORAGE', {'href': '/storage/%s' % (str(node.image.id)), 'name': node.image.name}) ET.SubElement(disk, 'SAVE_AS', {'name': str(name)}) xml = ET.tostring(compute) url = '/compute/%s' % compute_node_id resp = self.connection.request(url, method='PUT', data=xml) if resp.status == httplib.BAD_REQUEST: return False else: return True def _to_network(self, element): """ Take XML object containing a network description and convert to OpenNebulaNetwork object. Take XML representation containing a network description and convert to OpenNebulaNetwork object. :return: The newly extracted :class:`OpenNebulaNetwork`. :rtype: :class:`OpenNebulaNetwork` """ return OpenNebulaNetwork(id=element.findtext('ID'), name=element.findtext('NAME'), address=element.findtext('ADDRESS'), size=element.findtext('SIZE'), driver=self.connection.driver, extra={'public': element.findtext('PUBLIC')}) class OpenNebula_3_2_NodeDriver(OpenNebula_3_0_NodeDriver): """ OpenNebula.org node driver for OpenNebula.org v3.2. """ name = 'OpenNebula (v3.2)' def reboot_node(self, node): return self.ex_node_action(node, ACTION.REBOOT) def list_sizes(self, location=None): """ Return list of sizes on a provider. @inherits: :class:`NodeDriver.list_sizes` :return: List of compute node sizes supported by the cloud provider. :rtype: ``list`` of :class:`OpenNebulaNodeSize` """ return self._to_sizes(self.connection.request('/instance_type').object) def _to_sizes(self, object): """ Request a list of instance types and convert that list to a list of OpenNebulaNodeSize objects. Request a list of instance types from the OpenNebula web interface, and issue a request to convert each XML object representation of an instance type to an OpenNebulaNodeSize object. :return: List of instance types. :rtype: ``list`` of :class:`OpenNebulaNodeSize` """ sizes = [] size_id = 1 attributes = [('name', str, None), ('ram', int, 'MEMORY'), ('cpu', float, None), ('vcpu', float, None), ('disk', str, None), ('bandwidth', float, None), ('price', float, None)] for element in object.findall('INSTANCE_TYPE'): size_kwargs = {'id': size_id, 'driver': self} values = self._get_attributes_values(attributes=attributes, element=element) size_kwargs.update(values) size = OpenNebulaNodeSize(**size_kwargs) sizes.append(size) size_id += 1 return sizes def _get_attributes_values(self, attributes, element): values = {} for attribute_name, attribute_type, alias in attributes: key = alias if alias else attribute_name.upper() value = element.findtext(key) if value is not None: value = attribute_type(value) values[attribute_name] = value return values class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver): """ OpenNebula.org node driver for OpenNebula.org v3.6. """ name = 'OpenNebula (v3.6)' def create_volume(self, size, name, location=None, snapshot=None): storage = ET.Element('STORAGE') vol_name = ET.SubElement(storage, 'NAME') vol_name.text = name vol_type = ET.SubElement(storage, 'TYPE') vol_type.text = 'DATABLOCK' description = ET.SubElement(storage, 'DESCRIPTION') description.text = 'Attached storage' public = ET.SubElement(storage, 'PUBLIC') public.text = 'NO' persistent = ET.SubElement(storage, 'PERSISTENT') persistent.text = 'YES' fstype = ET.SubElement(storage, 'FSTYPE') fstype.text = 'ext3' vol_size = ET.SubElement(storage, 'SIZE') vol_size.text = str(size) xml = ET.tostring(storage) volume = self.connection.request('/storage', {'occixml': xml}, method='POST').object return self._to_volume(volume) def destroy_volume(self, volume): url = '/storage/%s' % (str(volume.id)) resp = self.connection.request(url, method='DELETE') return resp.status == httplib.NO_CONTENT def attach_volume(self, node, volume, device): action = ET.Element('ACTION') perform = ET.SubElement(action, 'PERFORM') perform.text = 'ATTACHDISK' params = ET.SubElement(action, 'PARAMS') ET.SubElement(params, 'STORAGE', {'href': '/storage/%s' % (str(volume.id))}) target = ET.SubElement(params, 'TARGET') target.text = device xml = ET.tostring(action) url = '/compute/%s/action' % node.id resp = self.connection.request(url, method='POST', data=xml) return resp.status == httplib.ACCEPTED def _do_detach_volume(self, node_id, disk_id): action = ET.Element('ACTION') perform = ET.SubElement(action, 'PERFORM') perform.text = 'DETACHDISK' params = ET.SubElement(action, 'PARAMS') ET.SubElement(params, 'DISK', {'id': disk_id}) xml = ET.tostring(action) url = '/compute/%s/action' % node_id resp = self.connection.request(url, method='POST', data=xml) return resp.status == httplib.ACCEPTED def detach_volume(self, volume): # We need to find the node using this volume for node in self.list_nodes(): if type(node.image) is not list: # This node has only one associated image. It is not the one we # are after. continue for disk in node.image: if disk.id == volume.id: # Node found. We can now detach the volume disk_id = disk.extra['disk_id'] return self._do_detach_volume(node.id, disk_id) return False def list_volumes(self): return self._to_volumes(self.connection.request('/storage').object) def _to_volume(self, storage): return StorageVolume(id=storage.findtext('ID'), name=storage.findtext('NAME'), size=int(storage.findtext('SIZE')), driver=self.connection.driver) def _to_volumes(self, object): volumes = [] for storage in object.findall('STORAGE'): storage_id = storage.attrib['href'].partition('/storage/')[2] volumes.append(self._to_volume( self.connection.request('/storage/%s' % storage_id).object)) return volumes class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver): """ OpenNebula.org node driver for OpenNebula.org v3.8. """ name = 'OpenNebula (v3.8)' plain_auth = API_PLAIN_AUTH def _to_sizes(self, object): """ Request a list of instance types and convert that list to a list of OpenNebulaNodeSize objects. Request a list of instance types from the OpenNebula web interface, and issue a request to convert each XML object representation of an instance type to an OpenNebulaNodeSize object. :return: List of instance types. :rtype: ``list`` of :class:`OpenNebulaNodeSize` """ sizes = [] size_id = 1 attributes = [('name', str, None), ('ram', int, 'MEMORY'), ('cpu', float, None), ('vcpu', float, None), ('disk', str, None), ('bandwidth', float, None), ('price', float, None)] for element in object.findall('INSTANCE_TYPE'): element = self.connection.request( ('/instance_type/%s') % (element.attrib['name'])).object size_kwargs = {'id': size_id, 'driver': self} values = self._get_attributes_values(attributes=attributes, element=element) size_kwargs.update(values) size = OpenNebulaNodeSize(**size_kwargs) sizes.append(size) size_id += 1 return sizes def _ex_connection_class_kwargs(self): """ Set plain_auth as an extra :class:`OpenNebulaConnection_3_8` argument :return: ``dict`` of :class:`OpenNebulaConnection_3_8` input arguments """ return {'plain_auth': self.plain_auth} apache-libcloud-2.2.1/libcloud/compute/drivers/azure.py0000664000175000017500000033503413153541406023057 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Driver for Microsoft Azure Virtual Machines service. http://azure.microsoft.com/en-us/services/virtual-machines/ """ import re import time import collections import random import sys import copy import base64 from datetime import datetime from xml.dom import minidom from xml.sax.saxutils import escape as xml_escape from libcloud.utils.py3 import ET from libcloud.common.azure import AzureServiceManagementConnection from libcloud.common.azure import AzureRedirectException from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume from libcloud.compute.types import NodeState from libcloud.common.types import LibcloudError from libcloud.utils.py3 import _real_unicode from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import ensure_string from libcloud.utils.py3 import urlquote as url_quote from libcloud.utils.misc import ReprMixin HTTPSConnection = httplib.HTTPSConnection if sys.version_info < (3,): _unicode_type = unicode def _str(value): if isinstance(value, unicode): return value.encode('utf-8') return str(value) else: _str = str _unicode_type = str AZURE_SERVICE_MANAGEMENT_HOST = 'management.core.windows.net' X_MS_VERSION = '2013-08-01' WINDOWS_SERVER_REGEX = re.compile( r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk' ) """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx Prices are for Linux instances in East US data center. To see what pricing will actually be, visit: http://azure.microsoft.com/en-gb/pricing/details/virtual-machines/ """ AZURE_COMPUTE_INSTANCE_TYPES = { 'A0': { 'id': 'ExtraSmall', 'name': 'Extra Small Instance', 'ram': 768, 'disk': 127, 'bandwidth': None, 'price': '0.0211', 'max_data_disks': 1, 'cores': 'Shared' }, 'A1': { 'id': 'Small', 'name': 'Small Instance', 'ram': 1792, 'disk': 127, 'bandwidth': None, 'price': '0.0633', 'max_data_disks': 2, 'cores': 1 }, 'A2': { 'id': 'Medium', 'name': 'Medium Instance', 'ram': 3584, 'disk': 127, 'bandwidth': None, 'price': '0.1266', 'max_data_disks': 4, 'cores': 2 }, 'A3': { 'id': 'Large', 'name': 'Large Instance', 'ram': 7168, 'disk': 127, 'bandwidth': None, 'price': '0.2531', 'max_data_disks': 8, 'cores': 4 }, 'A4': { 'id': 'ExtraLarge', 'name': 'Extra Large Instance', 'ram': 14336, 'disk': 127, 'bandwidth': None, 'price': '0.5062', 'max_data_disks': 16, 'cores': 8 }, 'A5': { 'id': 'A5', 'name': 'Memory Intensive Instance', 'ram': 14336, 'disk': 127, 'bandwidth': None, 'price': '0.2637', 'max_data_disks': 4, 'cores': 2 }, 'A6': { 'id': 'A6', 'name': 'A6 Instance', 'ram': 28672, 'disk': 127, 'bandwidth': None, 'price': '0.5273', 'max_data_disks': 8, 'cores': 4 }, 'A7': { 'id': 'A7', 'name': 'A7 Instance', 'ram': 57344, 'disk': 127, 'bandwidth': None, 'price': '1.0545', 'max_data_disks': 16, 'cores': 8 }, 'A8': { 'id': 'A8', 'name': 'A8 Instance', 'ram': 57344, 'disk': 127, 'bandwidth': None, 'price': '2.0774', 'max_data_disks': 16, 'cores': 8 }, 'A9': { 'id': 'A9', 'name': 'A9 Instance', 'ram': 114688, 'disk': 127, 'bandwidth': None, 'price': '4.7137', 'max_data_disks': 16, 'cores': 16 }, 'A10': { 'id': 'A10', 'name': 'A10 Instance', 'ram': 57344, 'disk': 127, 'bandwidth': None, 'price': '1.2233', 'max_data_disks': 16, 'cores': 8 }, 'A11': { 'id': 'A11', 'name': 'A11 Instance', 'ram': 114688, 'disk': 127, 'bandwidth': None, 'price': '2.1934', 'max_data_disks': 16, 'cores': 16 }, 'D1': { 'id': 'Standard_D1', 'name': 'D1 Faster Compute Instance', 'ram': 3584, 'disk': 127, 'bandwidth': None, 'price': '0.0992', 'max_data_disks': 2, 'cores': 1 }, 'D2': { 'id': 'Standard_D2', 'name': 'D2 Faster Compute Instance', 'ram': 7168, 'disk': 127, 'bandwidth': None, 'price': '0.1983', 'max_data_disks': 4, 'cores': 2 }, 'D3': { 'id': 'Standard_D3', 'name': 'D3 Faster Compute Instance', 'ram': 14336, 'disk': 127, 'bandwidth': None, 'price': '0.3965', 'max_data_disks': 8, 'cores': 4 }, 'D4': { 'id': 'Standard_D4', 'name': 'D4 Faster Compute Instance', 'ram': 28672, 'disk': 127, 'bandwidth': None, 'price': '0.793', 'max_data_disks': 16, 'cores': 8 }, 'D11': { 'id': 'Standard_D11', 'name': 'D11 Faster Compute Instance', 'ram': 14336, 'disk': 127, 'bandwidth': None, 'price': '0.251', 'max_data_disks': 4, 'cores': 2 }, 'D12': { 'id': 'Standard_D12', 'name': 'D12 Faster Compute Instance', 'ram': 28672, 'disk': 127, 'bandwidth': None, 'price': '0.502', 'max_data_disks': 8, 'cores': 4 }, 'D13': { 'id': 'Standard_D13', 'name': 'D13 Faster Compute Instance', 'ram': 57344, 'disk': 127, 'bandwidth': None, 'price': '0.9038', 'max_data_disks': 16, 'cores': 8 }, 'D14': { 'id': 'Standard_D14', 'name': 'D14 Faster Compute Instance', 'ram': 114688, 'disk': 127, 'bandwidth': None, 'price': '1.6261', 'max_data_disks': 32, 'cores': 16 } } _KNOWN_SERIALIZATION_XFORMS = { 'include_apis': 'IncludeAPIs', 'message_id': 'MessageId', 'content_md5': 'Content-MD5', 'last_modified': 'Last-Modified', 'cache_control': 'Cache-Control', 'account_admin_live_email_id': 'AccountAdminLiveEmailId', 'service_admin_live_email_id': 'ServiceAdminLiveEmailId', 'subscription_id': 'SubscriptionID', 'fqdn': 'FQDN', 'private_id': 'PrivateID', 'os_virtual_hard_disk': 'OSVirtualHardDisk', 'logical_disk_size_in_gb': 'LogicalDiskSizeInGB', 'logical_size_in_gb': 'LogicalSizeInGB', 'os': 'OS', 'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo', 'copy_id': 'CopyId', 'os_disk_configuration': 'OSDiskConfiguration', 'is_dns_programmed': 'IsDnsProgrammed' } class AzureNodeDriver(NodeDriver): connectionCls = AzureServiceManagementConnection name = 'Azure Virtual machines' website = 'http://azure.microsoft.com/en-us/services/virtual-machines/' type = Provider.AZURE _instance_types = AZURE_COMPUTE_INSTANCE_TYPES _blob_url = ".blob.core.windows.net" features = {'create_node': ['password']} service_location = collections.namedtuple( 'service_location', ['is_affinity_group', 'service_location'] ) NODE_STATE_MAP = { 'RoleStateUnknown': NodeState.UNKNOWN, 'CreatingVM': NodeState.PENDING, 'StartingVM': NodeState.PENDING, 'Provisioning': NodeState.PENDING, 'CreatingRole': NodeState.PENDING, 'StartingRole': NodeState.PENDING, 'ReadyRole': NodeState.RUNNING, 'BusyRole': NodeState.PENDING, 'StoppingRole': NodeState.PENDING, 'StoppingVM': NodeState.PENDING, 'DeletingVM': NodeState.PENDING, 'StoppedVM': NodeState.STOPPED, 'RestartingRole': NodeState.REBOOTING, 'CyclingRole': NodeState.TERMINATED, 'FailedStartingRole': NodeState.TERMINATED, 'FailedStartingVM': NodeState.TERMINATED, 'UnresponsiveRole': NodeState.TERMINATED, 'StoppedDeallocated': NodeState.TERMINATED, } def __init__(self, subscription_id=None, key_file=None, **kwargs): """ subscription_id contains the Azure subscription id in the form of GUID key_file contains the Azure X509 certificate in .pem form """ self.subscription_id = subscription_id self.key_file = key_file self.follow_redirects = kwargs.get('follow_redirects', True) super(AzureNodeDriver, self).__init__( self.subscription_id, self.key_file, secure=True, **kwargs ) def list_sizes(self): """ Lists all sizes :rtype: ``list`` of :class:`NodeSize` """ sizes = [] for _, values in self._instance_types.items(): node_size = self._to_node_size(copy.deepcopy(values)) sizes.append(node_size) return sizes def list_images(self, location=None): """ Lists all images :rtype: ``list`` of :class:`NodeImage` """ data = self._perform_get(self._get_image_path(), Images) custom_image_data = self._perform_get( self._get_vmimage_path(), VMImages ) images = [self._to_image(i) for i in data] images.extend(self._vm_to_image(j) for j in custom_image_data) if location is not None: images = [ image for image in images if location in image.extra["location"] ] return images def list_locations(self): """ Lists all locations :rtype: ``list`` of :class:`NodeLocation` """ data = self._perform_get( '/' + self.subscription_id + '/locations', Locations ) return [self._to_location(l) for l in data] def list_nodes(self, ex_cloud_service_name): """ List all nodes ex_cloud_service_name parameter is used to scope the request to a specific Cloud Service. This is a required parameter as nodes cannot exist outside of a Cloud Service nor be shared between a Cloud Service within Azure. :param ex_cloud_service_name: Cloud Service name :type ex_cloud_service_name: ``str`` :rtype: ``list`` of :class:`Node` """ response = self._perform_get( self._get_hosted_service_path(ex_cloud_service_name) + '?embed-detail=True', None ) self.raise_for_response(response, 200) data = self._parse_response(response, HostedService) vips = None if (len(data.deployments) > 0 and data.deployments[0].virtual_ips is not None): vips = [vip.address for vip in data.deployments[0].virtual_ips] try: return [ self._to_node(n, ex_cloud_service_name, vips) for n in data.deployments[0].role_instance_list ] except IndexError: return [] def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): """ Reboots a node. ex_cloud_service_name parameter is used to scope the request to a specific Cloud Service. This is a required parameter as nodes cannot exist outside of a Cloud Service nor be shared between a Cloud Service within Azure. :param ex_cloud_service_name: Cloud Service name :type ex_cloud_service_name: ``str`` :param ex_deployment_slot: Options are "production" (default) or "Staging". (Optional) :type ex_deployment_slot: ``str`` :rtype: ``bool`` """ if ex_cloud_service_name is None: if node.extra is not None: ex_cloud_service_name = node.extra.get( 'ex_cloud_service_name' ) if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") if not ex_deployment_slot: ex_deployment_slot = "Production" _deployment_name = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ).name try: response = self._perform_post( self._get_deployment_path_using_name( ex_cloud_service_name, _deployment_name ) + '/roleinstances/' + _str(node.id) + '?comp=reboot', '' ) self.raise_for_response(response, 202) if self._parse_response_for_async_op(response): return True else: return False except Exception: return False def list_volumes(self, node=None): """ Lists volumes of the disks in the image repository that are associated with the specified subscription. Pass Node object to scope the list of volumes to a single instance. :rtype: ``list`` of :class:`StorageVolume` """ data = self._perform_get(self._get_disk_path(), Disks) volumes = [self._to_volume(volume=v, node=node) for v in data] return volumes def create_node(self, name, size, image, ex_cloud_service_name, ex_storage_service_name=None, ex_new_deployment=False, ex_deployment_slot="Production", ex_deployment_name=None, ex_admin_user_id="azureuser", ex_custom_data=None, ex_virtual_network_name=None, ex_network_config=None, auth=None, **kwargs): """ Create Azure Virtual Machine Reference: http://bit.ly/1fIsCb7 [www.windowsazure.com/en-us/documentation/] We default to: + 3389/TCP - RDP - 1st Microsoft instance. + RANDOM/TCP - RDP - All succeeding Microsoft instances. + 22/TCP - SSH - 1st Linux instance + RANDOM/TCP - SSH - All succeeding Linux instances. The above replicates the standard behavior of the Azure UI. You can retrieve the assigned ports to each instance by using the following private function: _get_endpoint_ports(service_name) Returns public,private port key pair. @inherits: :class:`NodeDriver.create_node` :keyword image: The image to use when creating this node :type image: `NodeImage` :keyword size: The size of the instance to create :type size: `NodeSize` :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. :type ex_cloud_service_name: ``str`` :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. :type ex_storage_service_name: ``str`` :keyword ex_new_deployment: Optional. Tells azure to create a new deployment rather than add to an existing one. :type ex_new_deployment: ``boolean`` :keyword ex_deployment_slot: Optional: Valid values: production| staging. Defaults to production. :type ex_deployment_slot: ``str`` :keyword ex_deployment_name: Optional. The name of the deployment. If this is not passed in we default to using the Cloud Service name. :type ex_deployment_name: ``str`` :type ex_custom_data: ``str`` :keyword ex_custom_data: Optional script or other data which is injected into the VM when it's beginning provisioned. :keyword ex_admin_user_id: Optional. Defaults to 'azureuser'. :type ex_admin_user_id: ``str`` :keyword ex_virtual_network_name: Optional. If this is not passed in no virtual network is used. :type ex_virtual_network_name: ``str`` :keyword ex_network_config: Optional. The ConfigurationSet to use for network configuration :type ex_network_config: `ConfigurationSet` """ # TODO: Refactor this method to make it more readable, split it into # multiple smaller methods auth = self._get_and_check_auth(auth) password = auth.password if not isinstance(size, NodeSize): raise ValueError('Size must be an instance of NodeSize') if not isinstance(image, NodeImage): raise ValueError( "Image must be an instance of NodeImage, " "produced by list_images()" ) # Retrieve a list of currently available nodes for the provided cloud # service node_list = self.list_nodes( ex_cloud_service_name=ex_cloud_service_name ) if ex_network_config is None: network_config = ConfigurationSet() else: network_config = ex_network_config network_config.configuration_set_type = 'NetworkConfiguration' # Base64 encode custom data if provided if ex_custom_data: ex_custom_data = self._encode_base64(data=ex_custom_data) # We do this because we need to pass a Configuration to the # method. This will be either Linux or Windows. if WINDOWS_SERVER_REGEX.search(image.id, re.I): machine_config = WindowsConfigurationSet( computer_name=name, admin_password=password, admin_user_name=ex_admin_user_id ) machine_config.domain_join = None if not node_list or ex_new_deployment: port = "3389" else: port = random.randint(41952, 65535) endpoints = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ) for instances in endpoints.role_instance_list: ports = [ep.public_port for ep in instances.instance_endpoints] while port in ports: port = random.randint(41952, 65535) endpoint = ConfigurationSetInputEndpoint( name='Remote Desktop', protocol='tcp', port=port, local_port='3389', load_balanced_endpoint_set_name=None, enable_direct_server_return=False ) else: if not node_list or ex_new_deployment: port = "22" else: port = random.randint(41952, 65535) endpoints = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ) for instances in endpoints.role_instance_list: ports = [] if instances.instance_endpoints is not None: for ep in instances.instance_endpoints: ports += [ep.public_port] while port in ports: port = random.randint(41952, 65535) endpoint = ConfigurationSetInputEndpoint( name='SSH', protocol='tcp', port=port, local_port='22', load_balanced_endpoint_set_name=None, enable_direct_server_return=False ) machine_config = LinuxConfigurationSet( name, ex_admin_user_id, password, False, ex_custom_data ) network_config.input_endpoints.items.append(endpoint) _storage_location = self._get_cloud_service_location( service_name=ex_cloud_service_name ) if ex_storage_service_name is None: ex_storage_service_name = ex_cloud_service_name ex_storage_service_name = re.sub( r'[\W_-]+', '', ex_storage_service_name.lower(), flags=re.UNICODE ) if self._is_storage_service_unique( service_name=ex_storage_service_name): self._create_storage_account( service_name=ex_storage_service_name, location=_storage_location.service_location, is_affinity_group=_storage_location.is_affinity_group ) # OK, bit annoying here. You must create a deployment before # you can create an instance; however, the deployment function # creates the first instance, but all subsequent instances # must be created using the add_role function. # # So, yeah, annoying. if not node_list or ex_new_deployment: # This is the first node in this cloud service. if not ex_deployment_name: ex_deployment_name = ex_cloud_service_name vm_image_id = None disk_config = None if image.extra.get('vm_image', False): vm_image_id = image.id # network_config = None else: blob_url = "http://%s.blob.core.windows.net" % ( ex_storage_service_name) # Azure's pattern in the UI. disk_name = "%s-%s-%s.vhd" % ( ex_cloud_service_name, name, time.strftime("%Y-%m-%d") ) media_link = "%s/vhds/%s" % (blob_url, disk_name) disk_config = OSVirtualHardDisk(image.id, media_link) response = self._perform_post( self._get_deployment_path_using_name(ex_cloud_service_name), AzureXmlSerializer.virtual_machine_deployment_to_xml( ex_deployment_name, ex_deployment_slot, name, name, machine_config, disk_config, 'PersistentVMRole', network_config, None, None, size.id, ex_virtual_network_name, vm_image_id ) ) self.raise_for_response(response, 202) self._ex_complete_async_azure_operation(response) else: _deployment_name = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ).name vm_image_id = None disk_config = None if image.extra.get('vm_image', False): vm_image_id = image.id # network_config = None else: blob_url = "http://%s.blob.core.windows.net" % ( ex_storage_service_name ) disk_name = "%s-%s-%s.vhd" % ( ex_cloud_service_name, name, time.strftime("%Y-%m-%d") ) media_link = "%s/vhds/%s" % (blob_url, disk_name) disk_config = OSVirtualHardDisk(image.id, media_link) path = self._get_role_path(ex_cloud_service_name, _deployment_name) body = AzureXmlSerializer.add_role_to_xml( name, # role_name machine_config, # system_config disk_config, # os_virtual_hard_disk 'PersistentVMRole', # role_type network_config, # network_config None, # availability_set_name None, # data_virtual_hard_disks vm_image_id, # vm_image size.id # role_size ) response = self._perform_post(path, body) self.raise_for_response(response, 202) self._ex_complete_async_azure_operation(response) return Node( id=name, name=name, state=NodeState.PENDING, public_ips=[], private_ips=[], driver=self.connection.driver, extra={ 'ex_cloud_service_name': ex_cloud_service_name } ) def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot="Production"): """ Remove Azure Virtual Machine This removes the instance, but does not remove the disk. You will need to use destroy_volume. Azure sometimes has an issue where it will hold onto a blob lease for an extended amount of time. :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. :type ex_cloud_service_name: ``str`` :keyword ex_deployment_slot: Optional: The name of the deployment slot. If this is not passed in we default to production. :type ex_deployment_slot: ``str`` """ if not isinstance(node, Node): raise ValueError("A libcloud Node object is required.") if ex_cloud_service_name is None and node.extra is not None: ex_cloud_service_name = node.extra.get('ex_cloud_service_name') if not ex_cloud_service_name: raise ValueError("Unable to get ex_cloud_service_name from Node.") _deployment = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ) _deployment_name = _deployment.name _server_deployment_count = len(_deployment.role_instance_list) if _server_deployment_count > 1: path = self._get_role_path( ex_cloud_service_name, _deployment_name, node.id ) else: path = self._get_deployment_path_using_name( ex_cloud_service_name, _deployment_name ) path += '?comp=media' self._perform_delete(path) return True def ex_list_cloud_services(self): return self._perform_get( self._get_hosted_service_path(), HostedServices ) def ex_create_cloud_service(self, name, location, description=None, extended_properties=None): """ Create an azure cloud service. :param name: Name of the service to create :type name: ``str`` :param location: Standard azure location string :type location: ``str`` :param description: Optional description :type description: ``str`` :param extended_properties: Optional extended_properties :type extended_properties: ``dict`` :rtype: ``bool`` """ response = self._perform_cloud_service_create( self._get_hosted_service_path(), AzureXmlSerializer.create_hosted_service_to_xml( name, self._encode_base64(name), description, location, None, extended_properties ) ) self.raise_for_response(response, 201) return True def ex_destroy_cloud_service(self, name): """ Delete an azure cloud service. :param name: Name of the cloud service to destroy. :type name: ``str`` :rtype: ``bool`` """ response = self._perform_cloud_service_delete( self._get_hosted_service_path(name) ) self.raise_for_response(response, 200) return True def ex_add_instance_endpoints(self, node, endpoints, ex_deployment_slot="Production"): all_endpoints = [ { "name": endpoint.name, "protocol": endpoint.protocol, "port": endpoint.public_port, "local_port": endpoint.local_port, } for endpoint in node.extra['instance_endpoints'] ] all_endpoints.extend(endpoints) result = self.ex_set_instance_endpoints(node, all_endpoints, ex_deployment_slot) return result def ex_set_instance_endpoints(self, node, endpoints, ex_deployment_slot="Production"): """ For example:: endpoint = ConfigurationSetInputEndpoint( name='SSH', protocol='tcp', port=port, local_port='22', load_balanced_endpoint_set_name=None, enable_direct_server_return=False ) { 'name': 'SSH', 'protocol': 'tcp', 'port': port, 'local_port': '22' } """ ex_cloud_service_name = node.extra['ex_cloud_service_name'] vm_role_name = node.name network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' for endpoint in endpoints: new_endpoint = ConfigurationSetInputEndpoint(**endpoint) network_config.input_endpoints.items.append(new_endpoint) _deployment_name = self._get_deployment( service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot ).name response = self._perform_put( self._get_role_path( ex_cloud_service_name, _deployment_name, vm_role_name ), AzureXmlSerializer.add_role_to_xml( None, # role_name None, # system_config None, # os_virtual_hard_disk 'PersistentVMRole', # role_type network_config, # network_config None, # availability_set_name None, # data_virtual_hard_disks None, # vm_image None # role_size ) ) self.raise_for_response(response, 202) def ex_create_storage_service(self, name, location, description=None, affinity_group=None, extended_properties=None): """ Create an azure storage service. :param name: Name of the service to create :type name: ``str`` :param location: Standard azure location string :type location: ``str`` :param description: (Optional) Description of storage service. :type description: ``str`` :param affinity_group: (Optional) Azure affinity group. :type affinity_group: ``str`` :param extended_properties: (Optional) Additional configuration options support by Azure. :type extended_properties: ``dict`` :rtype: ``bool`` """ response = self._perform_storage_service_create( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_to_xml( service_name=name, label=self._encode_base64(name), description=description, location=location, affinity_group=affinity_group, extended_properties=extended_properties ) ) self.raise_for_response(response, 202) return True def ex_destroy_storage_service(self, name): """ Destroy storage service. Storage service must not have any active blobs. Sometimes Azure likes to hold onto volumes after they are deleted for an inordinate amount of time, so sleep before calling this method after volume deletion. :param name: Name of storage service. :type name: ``str`` :rtype: ``bool`` """ response = self._perform_storage_service_delete( self._get_storage_service_path(name) ) self.raise_for_response(response, 200) return True """ Functions not implemented """ def create_volume_snapshot(self): raise NotImplementedError( 'You cannot create snapshots of ' 'Azure VMs at this time.' ) def attach_volume(self): raise NotImplementedError( 'attach_volume is not supported ' 'at this time.' ) def create_volume(self): raise NotImplementedError( 'create_volume is not supported ' 'at this time.' ) def detach_volume(self): raise NotImplementedError( 'detach_volume is not supported ' 'at this time.' ) def destroy_volume(self): raise NotImplementedError( 'destroy_volume is not supported ' 'at this time.' ) """ Private Functions """ def _perform_cloud_service_create(self, path, data): request = AzureHTTPRequest() request.method = 'POST' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.body = data request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _perform_cloud_service_delete(self, path): request = AzureHTTPRequest() request.method = 'DELETE' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _perform_storage_service_create(self, path, data): request = AzureHTTPRequest() request.method = 'POST' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.body = data request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _perform_storage_service_delete(self, path): request = AzureHTTPRequest() request.method = 'DELETE' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None): """ Convert the data from a Azure response object into a Node """ remote_desktop_port = '' ssh_port = '' public_ips = virtual_ips or [] if data.instance_endpoints is not None: if len(data.instance_endpoints) >= 1: public_ips = [data.instance_endpoints[0].vip] for port in data.instance_endpoints: if port.name == 'Remote Desktop': remote_desktop_port = port.public_port if port.name == "SSH": ssh_port = port.public_port return Node( id=data.role_name, name=data.role_name, state=self.NODE_STATE_MAP.get( data.instance_status, NodeState.UNKNOWN ), public_ips=public_ips, private_ips=[data.ip_address], driver=self.connection.driver, extra={ 'instance_endpoints': data.instance_endpoints, 'remote_desktop_port': remote_desktop_port, 'ssh_port': ssh_port, 'power_state': data.power_state, 'instance_size': data.instance_size, 'ex_cloud_service_name': ex_cloud_service_name } ) def _to_location(self, data): """ Convert the data from a Azure response object into a location """ country = data.display_name if "Asia" in data.display_name: country = "Asia" if "Europe" in data.display_name: country = "Europe" if "US" in data.display_name: country = "US" if "Japan" in data.display_name: country = "Japan" if "Brazil" in data.display_name: country = "Brazil" vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes return AzureNodeLocation( id=data.name, name=data.display_name, country=country, driver=self.connection.driver, available_services=data.available_services, virtual_machine_role_sizes=vm_role_sizes ) def _to_node_size(self, data): """ Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize """ return NodeSize( id=data["id"], name=data["name"], ram=data["ram"], disk=data["disk"], bandwidth=data["bandwidth"], price=data["price"], driver=self.connection.driver, extra={ 'max_data_disks': data["max_data_disks"], 'cores': data["cores"] } ) def _to_image(self, data): return NodeImage( id=data.name, name=data.label, driver=self.connection.driver, extra={ 'os': data.os, 'category': data.category, 'description': data.description, 'location': data.location, 'affinity_group': data.affinity_group, 'media_link': data.media_link, 'vm_image': False } ) def _vm_to_image(self, data): return NodeImage( id=data.name, name=data.label, driver=self.connection.driver, extra={ 'os': data.os_disk_configuration.os, 'category': data.category, 'location': data.location, 'media_link': data.os_disk_configuration.media_link, 'affinity_group': data.affinity_group, 'deployment_name': data.deployment_name, 'vm_image': True } ) def _to_volume(self, volume, node): extra = { 'affinity_group': volume.affinity_group, 'os': volume.os, 'location': volume.location, 'media_link': volume.media_link, 'source_image_name': volume.source_image_name } role_name = getattr(volume.attached_to, 'role_name', None) hosted_service_name = getattr( volume.attached_to, 'hosted_service_name', None ) deployment_name = getattr( volume.attached_to, 'deployment_name', None ) if role_name is not None: extra['role_name'] = role_name if hosted_service_name is not None: extra['hosted_service_name'] = hosted_service_name if deployment_name is not None: extra['deployment_name'] = deployment_name if node: if role_name is not None and role_name == node.id: return StorageVolume( id=volume.name, name=volume.name, size=int(volume.logical_disk_size_in_gb), driver=self.connection.driver, extra=extra ) else: return StorageVolume( id=volume.name, name=volume.name, size=int(volume.logical_disk_size_in_gb), driver=self.connection.driver, extra=extra ) def _get_deployment(self, **kwargs): _service_name = kwargs['service_name'] _deployment_slot = kwargs['deployment_slot'] response = self._perform_get( self._get_deployment_path_using_slot( _service_name, _deployment_slot ), None ) self.raise_for_response(response, 200) return self._parse_response(response, Deployment) def _get_cloud_service_location(self, service_name=None): if not service_name: raise ValueError("service_name is required.") res = self._perform_get( '%s?embed-detail=False' % ( self._get_hosted_service_path(service_name) ), HostedService ) _affinity_group = res.hosted_service_properties.affinity_group _cloud_service_location = res.hosted_service_properties.location if _affinity_group is not None and _affinity_group is not '': return self.service_location(True, _affinity_group) elif _cloud_service_location is not None: return self.service_location(False, _cloud_service_location) else: return None def _is_storage_service_unique(self, service_name=None): if not service_name: raise ValueError("service_name is required.") _check_availability = self._perform_get( '%s/operations/isavailable/%s%s' % ( self._get_storage_service_path(), _str(service_name), '' ), AvailabilityResponse ) self.raise_for_response(_check_availability, 200) return _check_availability.result def _create_storage_account(self, **kwargs): if kwargs['is_affinity_group'] is True: response = self._perform_post( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], kwargs['service_name'], self._encode_base64(kwargs['service_name']), kwargs['location'], None, # Location True, # geo_replication_enabled None # extended_properties ) ) self.raise_for_response(response, 202) else: response = self._perform_post( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], kwargs['service_name'], self._encode_base64(kwargs['service_name']), None, # Affinity Group kwargs['location'], # Location True, # geo_replication_enabled None # extended_properties ) ) self.raise_for_response(response, 202) # We need to wait for this to be created before we can # create the storage container and the instance. self._ex_complete_async_azure_operation( response, "create_storage_account" ) def _get_operation_status(self, request_id): return self._perform_get( '/' + self.subscription_id + '/operations/' + _str(request_id), Operation ) def _perform_get(self, path, response_type): request = AzureHTTPRequest() request.method = 'GET' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) if response_type is not None: return self._parse_response(response, response_type) return response def _perform_post(self, path, body, response_type=None, async=False): request = AzureHTTPRequest() request.method = 'POST' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.body = ensure_string(self._get_request_body(body)) request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _perform_put(self, path, body, response_type=None, async=False): request = AzureHTTPRequest() request.method = 'PUT' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.body = ensure_string(self._get_request_body(body)) request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) return response def _perform_delete(self, path, async=False): request = AzureHTTPRequest() request.method = 'DELETE' request.host = AZURE_SERVICE_MANAGEMENT_HOST request.path = path request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) self.raise_for_response(response, 202) if async: return self._parse_response_for_async_op(response) def _perform_request(self, request): try: return self.connection.request( action=request.path, data=request.body, headers=request.headers, method=request.method ) except AzureRedirectException: e = sys.exc_info()[1] parsed_url = urlparse.urlparse(e.location) request.host = parsed_url.netloc return self._perform_request(request) except Exception as e: raise e def _update_request_uri_query(self, request): """ pulls the query string out of the URI and moves it into the query portion of the request object. If there are already query parameters on the request the parameters in the URI will appear after the existing parameters """ if '?' in request.path: request.path, _, query_string = request.path.partition('?') if query_string: query_params = query_string.split('&') for query in query_params: if '=' in query: name, _, value = query.partition('=') request.query.append((name, value)) request.path = url_quote(request.path, '/()$=\',') # add encoded queries to request.path. if request.query: request.path += '?' for name, value in request.query: if value is not None: request.path += '%s=%s%s' % ( name, url_quote(value, '/()$=\','), '&' ) request.path = request.path[:-1] return request.path, request.query def _update_management_header(self, request): """ Add additional headers for management. """ if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: request.headers['Content-Length'] = str(len(request.body)) # append additional headers base on the service # request.headers.append(('x-ms-version', X_MS_VERSION)) # if it is not GET or HEAD request, must set content-type. if request.method not in ['GET', 'HEAD']: for key in request.headers: if 'content-type' == key.lower(): break else: request.headers['Content-Type'] = 'application/xml' return request.headers def _parse_response(self, response, return_type): """ Parse the HTTPResponse's body and fill all the data into a class of return_type. """ return self._parse_response_body_from_xml_text( response=response, return_type=return_type ) def _parse_response_body_from_xml_text(self, response, return_type): """ parse the xml and fill all the data into a class of return_type """ respbody = response.body doc = minidom.parseString(respbody) return_obj = return_type() for node in self._get_child_nodes(doc, return_type.__name__): self._fill_data_to_return_object(node, return_obj) # Note: We always explicitly assign status code to the custom return # type object return_obj.status = response.status return return_obj def _get_child_nodes(self, node, tag_name): return [childNode for childNode in node.getElementsByTagName(tag_name) if childNode.parentNode == node] def _fill_data_to_return_object(self, node, return_obj): members = dict(vars(return_obj)) for name, value in members.items(): if isinstance(value, _ListOf): setattr( return_obj, name, self._fill_list_of( node, value.list_type, value.xml_element_name ) ) elif isinstance(value, ScalarListOf): setattr( return_obj, name, self._fill_scalar_list_of( node, value.list_type, self._get_serialization_name(name), value.xml_element_name ) ) elif isinstance(value, _DictOf): setattr( return_obj, name, self._fill_dict_of( node, self._get_serialization_name(name), value.pair_xml_element_name, value.key_xml_element_name, value.value_xml_element_name ) ) elif isinstance(value, WindowsAzureData): setattr( return_obj, name, self._fill_instance_child(node, name, value.__class__) ) elif isinstance(value, dict): setattr( return_obj, name, self._fill_dict( node, self._get_serialization_name(name) ) ) elif isinstance(value, _Base64String): value = self._fill_data_minidom(node, name, '') if value is not None: value = self._decode_base64_to_text(value) # always set the attribute, # so we don't end up returning an object # with type _Base64String setattr(return_obj, name, value) else: value = self._fill_data_minidom(node, name, value) if value is not None: setattr(return_obj, name, value) def _fill_list_of(self, xmldoc, element_type, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, xml_element_name) return [ self._parse_response_body_from_xml_node(xmlelement, element_type) for xmlelement in xmlelements ] def _parse_response_body_from_xml_node(self, node, return_type): """ parse the xml and fill all the data into a class of return_type """ return_obj = return_type() self._fill_data_to_return_object(node, return_obj) return return_obj def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: xmlelements = self._get_child_nodes( xmlelements[0], xml_element_name ) return [ self._get_node_value(xmlelement, element_type) for xmlelement in xmlelements ] def _get_node_value(self, xmlelement, data_type): value = xmlelement.firstChild.nodeValue if data_type is datetime: return self._to_datetime(value) elif data_type is bool: return value.lower() != 'false' else: return data_type(value) def _get_serialization_name(self, element_name): """ Converts a Python name into a serializable name. """ known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) if known is not None: return known if element_name.startswith('x_ms_'): return element_name.replace('_', '-') if element_name.endswith('_id'): element_name = element_name.replace('_id', 'ID') for name in ['content_', 'last_modified', 'if_', 'cache_control']: if element_name.startswith(name): element_name = element_name.replace('_', '-_') return ''.join(name.capitalize() for name in element_name.split('_')) def _fill_dict_of(self, xmldoc, parent_xml_element_name, pair_xml_element_name, key_xml_element_name, value_xml_element_name): return_obj = {} xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: xmlelements = self._get_child_nodes( xmlelements[0], pair_xml_element_name ) for pair in xmlelements: keys = self._get_child_nodes(pair, key_xml_element_name) values = self._get_child_nodes(pair, value_xml_element_name) if keys and values: key = keys[0].firstChild.nodeValue value = values[0].firstChild.nodeValue return_obj[key] = value return return_obj def _fill_instance_child(self, xmldoc, element_name, return_type): """ Converts a child of the current dom element to the specified type. """ xmlelements = self._get_child_nodes( xmldoc, self._get_serialization_name(element_name) ) if not xmlelements: return None return_obj = return_type() self._fill_data_to_return_object(xmlelements[0], return_obj) return return_obj def _fill_dict(self, xmldoc, element_name): xmlelements = self._get_child_nodes(xmldoc, element_name) if xmlelements: return_obj = {} for child in xmlelements[0].childNodes: if child.firstChild: return_obj[child.nodeName] = child.firstChild.nodeValue return return_obj def _encode_base64(self, data): if isinstance(data, _unicode_type): data = data.encode('utf-8') encoded = base64.b64encode(data) return encoded.decode('utf-8') def _decode_base64_to_bytes(self, data): if isinstance(data, _unicode_type): data = data.encode('utf-8') return base64.b64decode(data) def _decode_base64_to_text(self, data): decoded_bytes = self._decode_base64_to_bytes(data) return decoded_bytes.decode('utf-8') def _fill_data_minidom(self, xmldoc, element_name, data_member): xmlelements = self._get_child_nodes( xmldoc, self._get_serialization_name(element_name) ) if not xmlelements or not xmlelements[0].childNodes: return None value = xmlelements[0].firstChild.nodeValue if data_member is None: return value elif isinstance(data_member, datetime): return self._to_datetime(value) elif type(data_member) is bool: return value.lower() != 'false' elif type(data_member) is str: return _real_unicode(value) else: return type(data_member)(value) def _to_datetime(self, strtime): return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f") def _get_request_body(self, request_body): if request_body is None: return b'' if isinstance(request_body, WindowsAzureData): request_body = self._convert_class_to_xml(request_body) if isinstance(request_body, bytes): return request_body if isinstance(request_body, _unicode_type): return request_body.encode('utf-8') request_body = str(request_body) if isinstance(request_body, _unicode_type): return request_body.encode('utf-8') return request_body def _convert_class_to_xml(self, source, xml_prefix=True): root = ET.Element() doc = self._construct_element_tree(source, root) result = ensure_string(ET.tostring(doc, encoding='utf-8', method='xml')) return result def _construct_element_tree(self, source, etree): if source is None: return ET.Element() if isinstance(source, list): for value in source: etree.append(self._construct_element_tree(value, etree)) elif isinstance(source, WindowsAzureData): class_name = source.__class__.__name__ etree.append(ET.Element(class_name)) for name, value in vars(source).items(): if value is not None: if (isinstance(value, list) or isinstance(value, WindowsAzureData)): etree.append( self._construct_element_tree(value, etree) ) else: ele = ET.Element(self._get_serialization_name(name)) ele.text = xml_escape(str(value)) etree.append(ele) etree.append(ET.Element(class_name)) return etree def _parse_response_for_async_op(self, response): if response is None: return None result = AsynchronousOperationResult() if response.headers: for name, value in response.headers.items(): if name.lower() == 'x-ms-request-id': result.request_id = value return result def _get_deployment_path_using_name(self, service_name, deployment_name=None): components = [ 'services/hostedservices/', _str(service_name), '/deployments' ] resource = ''.join(components) return self._get_path(resource, deployment_name) def _get_path(self, resource, name): path = '/' + self.subscription_id + '/' + resource if name is not None: path += '/' + _str(name) return path def _get_image_path(self, image_name=None): return self._get_path('services/images', image_name) def _get_vmimage_path(self, image_name=None): return self._get_path('services/vmimages', image_name) def _get_hosted_service_path(self, service_name=None): return self._get_path('services/hostedservices', service_name) def _get_deployment_path_using_slot(self, service_name, slot=None): return self._get_path( 'services/hostedservices/%s/deploymentslots' % ( _str(service_name) ), slot ) def _get_disk_path(self, disk_name=None): return self._get_path('services/disks', disk_name) def _get_role_path(self, service_name, deployment_name, role_name=None): components = [ 'services/hostedservices/', _str(service_name), '/deployments/', deployment_name, '/roles' ] resource = ''.join(components) return self._get_path(resource, role_name) def _get_storage_service_path(self, service_name=None): return self._get_path('services/storageservices', service_name) def _ex_complete_async_azure_operation(self, response=None, operation_type='create_node'): request_id = self._parse_response_for_async_op(response) operation_status = self._get_operation_status(request_id.request_id) timeout = 60 * 5 waittime = 0 interval = 5 while operation_status.status == "InProgress" and waittime < timeout: operation_status = self._get_operation_status(request_id) if operation_status.status == "Succeeded": break waittime += interval time.sleep(interval) if operation_status.status == 'Failed': raise LibcloudError( 'Message: Async request for operation %s has failed' % operation_type, driver=self.connection.driver ) def raise_for_response(self, response, valid_response): if response.status != valid_response: values = (response.error, response.body, response.status) message = 'Message: %s, Body: %s, Status code: %s' % (values) raise LibcloudError(message, driver=self) """ XML Serializer Borrowed from the Azure SDK for Python which is licensed under Apache 2.0. https://github.com/Azure/azure-sdk-for-python """ def _lower(text): return text.lower() class AzureXmlSerializer(object): @staticmethod def create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties): return AzureXmlSerializer.doc_from_data( 'CreateStorageServiceInput', [ ('ServiceName', service_name), ('Description', description), ('Label', label), ('AffinityGroup', affinity_group), ('Location', location), ('GeoReplicationEnabled', geo_replication_enabled, _lower) ], extended_properties ) @staticmethod def update_storage_service_input_to_xml(description, label, geo_replication_enabled, extended_properties): return AzureXmlSerializer.doc_from_data( 'UpdateStorageServiceInput', [ ('Description', description), ('Label', label, AzureNodeDriver._encode_base64), ('GeoReplicationEnabled', geo_replication_enabled, _lower) ], extended_properties ) @staticmethod def regenerate_keys_to_xml(key_type): return AzureXmlSerializer.doc_from_data( 'RegenerateKeys', [('KeyType', key_type)] ) @staticmethod def update_hosted_service_to_xml(label, description, extended_properties): return AzureXmlSerializer.doc_from_data( 'UpdateHostedService', [ ('Label', label, AzureNodeDriver._encode_base64), ('Description', description) ], extended_properties ) @staticmethod def create_hosted_service_to_xml(service_name, label, description, location, affinity_group=None, extended_properties=None): if affinity_group: return AzureXmlSerializer.doc_from_data( 'CreateHostedService', [ ('ServiceName', service_name), ('Label', label), ('Description', description), ('AffinityGroup', affinity_group), ], extended_properties ) return AzureXmlSerializer.doc_from_data( 'CreateHostedService', [ ('ServiceName', service_name), ('Label', label), ('Description', description), ('Location', location), ], extended_properties ) @staticmethod def create_storage_service_to_xml(service_name, label, description, location, affinity_group, extended_properties=None): return AzureXmlSerializer.doc_from_data( 'CreateStorageServiceInput', [ ('ServiceName', service_name), ('Label', label), ('Description', description), ('Location', location), ('AffinityGroup', affinity_group) ], extended_properties ) @staticmethod def create_deployment_to_xml(name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties): return AzureXmlSerializer.doc_from_data( 'CreateDeployment', [ ('Name', name), ('PackageUrl', package_url), ('Label', label, AzureNodeDriver._encode_base64), ('Configuration', configuration), ('StartDeployment', start_deployment, _lower), ('TreatWarningsAsError', treat_warnings_as_error, _lower) ], extended_properties ) @staticmethod def swap_deployment_to_xml(production, source_deployment): return AzureXmlSerializer.doc_from_data( 'Swap', [ ('Production', production), ('SourceDeployment', source_deployment) ] ) @staticmethod def update_deployment_status_to_xml(status): return AzureXmlSerializer.doc_from_data( 'UpdateDeploymentStatus', [('Status', status)] ) @staticmethod def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, extended_properties): return AzureXmlSerializer.doc_from_data( 'ChangeConfiguration', [ ('Configuration', configuration), ('TreatWarningsAsError', treat_warnings_as_error, _lower), ('Mode', mode) ], extended_properties ) @staticmethod def upgrade_deployment_to_xml(mode, package_url, configuration, label, role_to_upgrade, force, extended_properties): return AzureXmlSerializer.doc_from_data( 'UpgradeDeployment', [ ('Mode', mode), ('PackageUrl', package_url), ('Configuration', configuration), ('Label', label, AzureNodeDriver._encode_base64), ('RoleToUpgrade', role_to_upgrade), ('Force', force, _lower) ], extended_properties ) @staticmethod def rollback_upgrade_to_xml(mode, force): return AzureXmlSerializer.doc_from_data( 'RollbackUpdateOrUpgrade', [ ('Mode', mode), ('Force', force, _lower) ] ) @staticmethod def walk_upgrade_domain_to_xml(upgrade_domain): return AzureXmlSerializer.doc_from_data( 'WalkUpgradeDomain', [('UpgradeDomain', upgrade_domain)] ) @staticmethod def certificate_file_to_xml(data, certificate_format, password): return AzureXmlSerializer.doc_from_data( 'CertificateFile', [ ('Data', data), ('CertificateFormat', certificate_format), ('Password', password) ] ) @staticmethod def create_affinity_group_to_xml(name, label, description, location): return AzureXmlSerializer.doc_from_data( 'CreateAffinityGroup', [ ('Name', name), ('Label', label, AzureNodeDriver._encode_base64), ('Description', description), ('Location', location) ] ) @staticmethod def update_affinity_group_to_xml(label, description): return AzureXmlSerializer.doc_from_data( 'UpdateAffinityGroup', [ ('Label', label, AzureNodeDriver._encode_base64), ('Description', description) ] ) @staticmethod def subscription_certificate_to_xml(public_key, thumbprint, data): return AzureXmlSerializer.doc_from_data( 'SubscriptionCertificate', [ ('SubscriptionCertificatePublicKey', public_key), ('SubscriptionCertificateThumbprint', thumbprint), ('SubscriptionCertificateData', data) ] ) @staticmethod def os_image_to_xml(label, media_link, name, os): return AzureXmlSerializer.doc_from_data( 'OSImage', [ ('Label', label), ('MediaLink', media_link), ('Name', name), ('OS', os) ] ) @staticmethod def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, logical_disk_size_in_gb, media_link, source_media_link): return AzureXmlSerializer.doc_from_data( 'DataVirtualHardDisk', [ ('HostCaching', host_caching), ('DiskLabel', disk_label), ('DiskName', disk_name), ('Lun', lun), ('LogicalDiskSizeInGB', logical_disk_size_in_gb), ('MediaLink', media_link), ('SourceMediaLink', source_media_link) ] ) @staticmethod def disk_to_xml(has_operating_system, label, media_link, name, os): return AzureXmlSerializer.doc_from_data( 'Disk', [ ('HasOperatingSystem', has_operating_system, _lower), ('Label', label), ('MediaLink', media_link), ('Name', name), ('OS', os) ] ) @staticmethod def restart_role_operation_to_xml(): xml = ET.Element("OperationType") xml.text = "RestartRoleOperation" doc = AzureXmlSerializer.doc_from_xml( 'RestartRoleOperation', xml ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def shutdown_role_operation_to_xml(): xml = ET.Element("OperationType") xml.text = "ShutdownRoleOperation" doc = AzureXmlSerializer.doc_from_xml( 'ShutdownRoleOperation', xml ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def start_role_operation_to_xml(): xml = ET.Element("OperationType") xml.text = "StartRoleOperation" doc = AzureXmlSerializer.doc_from_xml( 'StartRoleOperation', xml ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def windows_configuration_to_xml(configuration, xml): AzureXmlSerializer.data_to_xml( [('ConfigurationSetType', configuration.configuration_set_type)], xml ) AzureXmlSerializer.data_to_xml( [('ComputerName', configuration.computer_name)], xml ) AzureXmlSerializer.data_to_xml( [('AdminPassword', configuration.admin_password)], xml ) AzureXmlSerializer.data_to_xml( [ ( 'ResetPasswordOnFirstLogon', configuration.reset_password_on_first_logon, _lower ) ], xml ) AzureXmlSerializer.data_to_xml( [ ( 'EnableAutomaticUpdates', configuration.enable_automatic_updates, _lower ) ], xml ) AzureXmlSerializer.data_to_xml( [('TimeZone', configuration.time_zone)], xml ) if configuration.domain_join is not None: domain = ET.xml("DomainJoin") creds = ET.xml("Credentials") domain.appemnd(creds) xml.append(domain) AzureXmlSerializer.data_to_xml( [('Domain', configuration.domain_join.credentials.domain)], creds ) AzureXmlSerializer.data_to_xml( [ ( 'Username', configuration.domain_join.credentials.username ) ], creds ) AzureXmlSerializer.data_to_xml( [ ( 'Password', configuration.domain_join.credentials.password ) ], creds ) AzureXmlSerializer.data_to_xml( [('JoinDomain', configuration.domain_join.join_domain)], domain ) AzureXmlSerializer.data_to_xml( [ ( 'MachineObjectOU', configuration.domain_join.machine_object_ou ) ], domain ) if configuration.stored_certificate_settings is not None: cert_settings = ET.Element("StoredCertificateSettings") xml.append(cert_settings) for cert in configuration.stored_certificate_settings: cert_setting = ET.Element("CertificateSetting") cert_settings.append(cert_setting) cert_setting.append(AzureXmlSerializer.data_to_xml( [('StoreLocation', cert.store_location)]) ) AzureXmlSerializer.data_to_xml( [('StoreName', cert.store_name)], cert_setting ) AzureXmlSerializer.data_to_xml( [('Thumbprint', cert.thumbprint)], cert_setting ) AzureXmlSerializer.data_to_xml( [('AdminUsername', configuration.admin_user_name)], xml ) return xml @staticmethod def linux_configuration_to_xml(configuration, xml): AzureXmlSerializer.data_to_xml( [('ConfigurationSetType', configuration.configuration_set_type)], xml ) AzureXmlSerializer.data_to_xml( [('HostName', configuration.host_name)], xml ) AzureXmlSerializer.data_to_xml( [('UserName', configuration.user_name)], xml ) AzureXmlSerializer.data_to_xml( [('UserPassword', configuration.user_password)], xml ) AzureXmlSerializer.data_to_xml( [ ( 'DisableSshPasswordAuthentication', configuration.disable_ssh_password_authentication, _lower ) ], xml ) if configuration.ssh is not None: ssh = ET.Element("SSH") pkeys = ET.Element("PublicKeys") kpairs = ET.Element("KeyPairs") ssh.append(pkeys) ssh.append(kpairs) xml.append(ssh) for key in configuration.ssh.public_keys: pkey = ET.Element("PublicKey") pkeys.append(pkey) AzureXmlSerializer.data_to_xml( [('Fingerprint', key.fingerprint)], pkey ) AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey) for key in configuration.ssh.key_pairs: kpair = ET.Element("KeyPair") kpairs.append(kpair) AzureXmlSerializer.data_to_xml( [('Fingerprint', key.fingerprint)], kpair ) AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair) if configuration.custom_data is not None: AzureXmlSerializer.data_to_xml( [('CustomData', configuration.custom_data)], xml ) return xml @staticmethod def network_configuration_to_xml(configuration, xml): AzureXmlSerializer.data_to_xml( [('ConfigurationSetType', configuration.configuration_set_type)], xml ) input_endpoints = ET.Element("InputEndpoints") xml.append(input_endpoints) for endpoint in configuration.input_endpoints: input_endpoint = ET.Element("InputEndpoint") input_endpoints.append(input_endpoint) AzureXmlSerializer.data_to_xml( [ ( 'LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name ) ], input_endpoint ) AzureXmlSerializer.data_to_xml( [('LocalPort', endpoint.local_port)], input_endpoint ) AzureXmlSerializer.data_to_xml( [('Name', endpoint.name)], input_endpoint ) AzureXmlSerializer.data_to_xml( [('Port', endpoint.port)], input_endpoint ) if (endpoint.load_balancer_probe.path or endpoint.load_balancer_probe.port or endpoint.load_balancer_probe.protocol): load_balancer_probe = ET.Element("LoadBalancerProbe") input_endpoint.append(load_balancer_probe) AzureXmlSerializer.data_to_xml( [('Path', endpoint.load_balancer_probe.path)], load_balancer_probe ) AzureXmlSerializer.data_to_xml( [('Port', endpoint.load_balancer_probe.port)], load_balancer_probe ) AzureXmlSerializer.data_to_xml( [('Protocol', endpoint.load_balancer_probe.protocol)], load_balancer_probe ) AzureXmlSerializer.data_to_xml( [('Protocol', endpoint.protocol)], input_endpoint ) AzureXmlSerializer.data_to_xml( [ ( 'EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower ) ], input_endpoint ) subnet_names = ET.Element("SubnetNames") xml.append(subnet_names) for name in configuration.subnet_names: AzureXmlSerializer.data_to_xml( [('SubnetName', name)], subnet_names ) return xml @staticmethod def role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, role_size, role_type, system_configuration_set, xml): AzureXmlSerializer.data_to_xml([('RoleName', role_name)], xml) AzureXmlSerializer.data_to_xml([('RoleType', role_type)], xml) config_sets = ET.Element("ConfigurationSets") xml.append(config_sets) if system_configuration_set is not None: config_set = ET.Element("ConfigurationSet") config_sets.append(config_set) if isinstance(system_configuration_set, WindowsConfigurationSet): AzureXmlSerializer.windows_configuration_to_xml( system_configuration_set, config_set ) elif isinstance(system_configuration_set, LinuxConfigurationSet): AzureXmlSerializer.linux_configuration_to_xml( system_configuration_set, config_set ) if network_configuration_set is not None: config_set = ET.Element("ConfigurationSet") config_sets.append(config_set) AzureXmlSerializer.network_configuration_to_xml( network_configuration_set, config_set ) if availability_set_name is not None: AzureXmlSerializer.data_to_xml( [('AvailabilitySetName', availability_set_name)], xml ) if data_virtual_hard_disks is not None: vhds = ET.Element("DataVirtualHardDisks") xml.append(vhds) for hd in data_virtual_hard_disks: vhd = ET.Element("DataVirtualHardDisk") vhds.append(vhd) AzureXmlSerializer.data_to_xml( [('HostCaching', hd.host_caching)], vhd ) AzureXmlSerializer.data_to_xml( [('DiskLabel', hd.disk_label)], vhd ) AzureXmlSerializer.data_to_xml( [('DiskName', hd.disk_name)], vhd ) AzureXmlSerializer.data_to_xml( [('Lun', hd.lun)], vhd ) AzureXmlSerializer.data_to_xml( [('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)], vhd ) AzureXmlSerializer.data_to_xml( [('MediaLink', hd.media_link)], vhd ) if os_virtual_hard_disk is not None: hd = ET.Element("OSVirtualHardDisk") xml.append(hd) AzureXmlSerializer.data_to_xml( [('HostCaching', os_virtual_hard_disk.host_caching)], hd ) AzureXmlSerializer.data_to_xml( [('DiskLabel', os_virtual_hard_disk.disk_label)], hd ) AzureXmlSerializer.data_to_xml( [('DiskName', os_virtual_hard_disk.disk_name)], hd ) AzureXmlSerializer.data_to_xml( [('MediaLink', os_virtual_hard_disk.media_link)], hd ) AzureXmlSerializer.data_to_xml( [('SourceImageName', os_virtual_hard_disk.source_image_name)], hd ) if vm_image_name is not None: AzureXmlSerializer.data_to_xml( [('VMImageName', vm_image_name)], xml ) if role_size is not None: AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml) return xml @staticmethod def add_role_to_xml(role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, vm_image_name, role_size): doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole') xml = AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, role_size, role_type, system_configuration_set, doc ) result = ensure_string(ET.tostring(xml, encoding='utf-8')) return result @staticmethod def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, vm_image_name, role_size): doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole') AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, role_size, role_type, None, doc ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration): xml = AzureXmlSerializer.data_to_xml( [('OperationType', 'CaptureRoleOperation')] ) AzureXmlSerializer.data_to_xml( [('PostCaptureAction', post_capture_action)], xml ) if provisioning_configuration is not None: provisioning_config = ET.Element("ProvisioningConfiguration") xml.append(provisioning_config) if isinstance(provisioning_configuration, WindowsConfigurationSet): AzureXmlSerializer.windows_configuration_to_xml( provisioning_configuration, provisioning_config ) elif isinstance(provisioning_configuration, LinuxConfigurationSet): AzureXmlSerializer.linux_configuration_to_xml( provisioning_configuration, provisioning_config ) AzureXmlSerializer.data_to_xml( [('TargetImageLabel', target_image_label)], xml ) AzureXmlSerializer.data_to_xml( [('TargetImageName', target_image_name)], xml ) doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size, virtual_network_name, vm_image_name): doc = AzureXmlSerializer.doc_from_xml('Deployment') AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc) AzureXmlSerializer.data_to_xml( [('DeploymentSlot', deployment_slot)], doc ) AzureXmlSerializer.data_to_xml([('Label', label)], doc) role_list = ET.Element("RoleList") role = ET.Element("Role") role_list.append(role) doc.append(role_list) AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, role_size, role_type, system_configuration_set, role ) if virtual_network_name is not None: doc.append( AzureXmlSerializer.data_to_xml( [('VirtualNetworkName', virtual_network_name)] ) ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def data_to_xml(data, xml=None): """ Creates an xml fragment from the specified data. data: Array of tuples, where first: xml element name second: xml element text third: conversion function """ for element in data: name = element[0] val = element[1] if len(element) > 2: converter = element[2] else: converter = None if val is not None: if converter is not None: text = _str(converter(_str(val))) else: text = _str(val) entry = ET.Element(name) entry.text = text if xml is not None: xml.append(entry) else: return entry return xml @staticmethod def doc_from_xml(document_element_name, inner_xml=None): """ Wraps the specified xml in an xml root element with default azure namespaces """ # Note: Namespaces don't work consistency in Python 2 and 3. """ nsmap = { None: "http://www.w3.org/2001/XMLSchema-instance", "i": "http://www.w3.org/2001/XMLSchema-instance" } xml.attrib["xmlns:i"] = "http://www.w3.org/2001/XMLSchema-instance" xml.attrib["xmlns"] = "http://schemas.microsoft.com/windowsazure" """ xml = ET.Element(document_element_name) xml.set("xmlns", "http://schemas.microsoft.com/windowsazure") if inner_xml is not None: xml.append(inner_xml) return xml @staticmethod def doc_from_data(document_element_name, data, extended_properties=None): doc = AzureXmlSerializer.doc_from_xml(document_element_name) AzureXmlSerializer.data_to_xml(data, doc) if extended_properties is not None: doc.append( AzureXmlSerializer.extended_properties_dict_to_xml_fragment( extended_properties ) ) result = ensure_string(ET.tostring(doc, encoding='utf-8')) return result @staticmethod def extended_properties_dict_to_xml_fragment(extended_properties): if extended_properties is not None and len(extended_properties) > 0: xml = ET.Element("ExtendedProperties") for key, val in extended_properties.items(): extended_property = ET.Element("ExtendedProperty") name = ET.Element("Name") name.text = _str(key) value = ET.Element("Value") value.text = _str(val) extended_property.append(name) extended_property.append(value) xml.append(extended_property) return xml """ Data Classes Borrowed from the Azure SDK for Python. """ class WindowsAzureData(object): """ This is the base of data class. It is only used to check whether it is instance or not. """ pass class WindowsAzureDataTypedList(WindowsAzureData): list_type = None xml_element_name = None def __init__(self): self.items = _ListOf(self.list_type, self.xml_element_name) def __iter__(self): return iter(self.items) def __len__(self): return len(self.items) def __getitem__(self, index): return self.items[index] class OSVirtualHardDisk(WindowsAzureData): def __init__(self, source_image_name=None, media_link=None, host_caching=None, disk_label=None, disk_name=None): self.source_image_name = source_image_name self.media_link = media_link self.host_caching = host_caching self.disk_label = disk_label self.disk_name = disk_name self.os = '' # undocumented, not used when adding a role class LinuxConfigurationSet(WindowsAzureData): def __init__(self, host_name=None, user_name=None, user_password=None, disable_ssh_password_authentication=None, custom_data=None): self.configuration_set_type = 'LinuxProvisioningConfiguration' self.host_name = host_name self.user_name = user_name self.user_password = user_password self.disable_ssh_password_authentication = \ disable_ssh_password_authentication self.ssh = SSH() self.custom_data = custom_data class WindowsConfigurationSet(WindowsAzureData): def __init__(self, computer_name=None, admin_password=None, reset_password_on_first_logon=None, enable_automatic_updates=None, time_zone=None, admin_user_name=None): self.configuration_set_type = 'WindowsProvisioningConfiguration' self.computer_name = computer_name self.admin_password = admin_password self.reset_password_on_first_logon = reset_password_on_first_logon self.enable_automatic_updates = enable_automatic_updates self.time_zone = time_zone self.admin_user_name = admin_user_name self.domain_join = DomainJoin() self.stored_certificate_settings = StoredCertificateSettings() class DomainJoin(WindowsAzureData): def __init__(self): self.credentials = Credentials() self.join_domain = '' self.machine_object_ou = '' class Credentials(WindowsAzureData): def __init__(self): self.domain = '' self.username = '' self.password = '' class CertificateSetting(WindowsAzureData): """ Initializes a certificate setting. thumbprint: Specifies the thumbprint of the certificate to be provisioned. The thumbprint must specify an existing service certificate. store_name: Specifies the name of the certificate store from which retrieve certificate. store_location: Specifies the target certificate store location on the virtual machine The only supported value is LocalMachine. """ def __init__(self, thumbprint='', store_name='', store_location=''): self.thumbprint = thumbprint self.store_name = store_name self.store_location = store_location class StoredCertificateSettings(WindowsAzureDataTypedList): list_type = CertificateSetting _repr_attributes = [ 'items' ] class SSH(WindowsAzureData): def __init__(self): self.public_keys = PublicKeys() self.key_pairs = KeyPairs() class PublicKey(WindowsAzureData): def __init__(self, fingerprint='', path=''): self.fingerprint = fingerprint self.path = path class PublicKeys(WindowsAzureDataTypedList): list_type = PublicKey _repr_attributes = [ 'items' ] class AzureKeyPair(WindowsAzureData): def __init__(self, fingerprint='', path=''): self.fingerprint = fingerprint self.path = path class KeyPairs(WindowsAzureDataTypedList): list_type = AzureKeyPair _repr_attributes = [ 'items' ] class LoadBalancerProbe(WindowsAzureData): def __init__(self): self.path = '' self.port = '' self.protocol = '' class ConfigurationSet(WindowsAzureData): def __init__(self): self.configuration_set_type = '' self.role_type = '' self.input_endpoints = ConfigurationSetInputEndpoints() self.subnet_names = ScalarListOf(str, 'SubnetName') class ConfigurationSets(WindowsAzureDataTypedList): list_type = ConfigurationSet _repr_attributes = [ 'items' ] class ConfigurationSetInputEndpoint(WindowsAzureData): def __init__(self, name='', protocol='', port='', local_port='', load_balanced_endpoint_set_name='', enable_direct_server_return=False): self.enable_direct_server_return = enable_direct_server_return self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name self.local_port = local_port self.name = name self.port = port self.load_balancer_probe = LoadBalancerProbe() self.protocol = protocol class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList): list_type = ConfigurationSetInputEndpoint xml_element_name = 'InputEndpoint' _repr_attributes = [ 'items' ] class Location(WindowsAzureData): def __init__(self): self.name = '' self.display_name = '' self.available_services = ScalarListOf(str, 'AvailableService') self.compute_capabilities = ComputeCapability() class Locations(WindowsAzureDataTypedList): list_type = Location _repr_attributes = [ 'items' ] class ComputeCapability(WindowsAzureData): def __init__(self): self.virtual_machines_role_sizes = ScalarListOf(str, 'RoleSize') class VirtualMachinesRoleSizes(WindowsAzureData): def __init__(self): self.role_size = ScalarListOf(str, 'RoleSize') class OSImage(WindowsAzureData): def __init__(self): self.affinity_group = '' self.category = '' self.location = '' self.logical_size_in_gb = 0 self.label = '' self.media_link = '' self.name = '' self.os = '' self.eula = '' self.description = '' class Images(WindowsAzureDataTypedList): list_type = OSImage _repr_attributes = [ 'items' ] class VMImage(WindowsAzureData): def __init__(self): self.name = '' self.label = '' self.category = '' self.os_disk_configuration = OSDiskConfiguration() self.service_name = '' self.deployment_name = '' self.role_name = '' self.location = '' self.affinity_group = '' class VMImages(WindowsAzureDataTypedList): list_type = VMImage _repr_attributes = [ 'items' ] class VirtualIP(WindowsAzureData): def __init__(self): self.address = '' self.is_dns_programmed = '' self.name = '' class VirtualIPs(WindowsAzureDataTypedList): list_type = VirtualIP _repr_attributes = [ 'items' ] class HostedService(WindowsAzureData, ReprMixin): _repr_attributes = [ 'service_name', 'url' ] def __init__(self): self.url = '' self.service_name = '' self.hosted_service_properties = HostedServiceProperties() self.deployments = Deployments() class HostedServices(WindowsAzureDataTypedList, ReprMixin): list_type = HostedService _repr_attributes = [ 'items' ] class HostedServiceProperties(WindowsAzureData): def __init__(self): self.description = '' self.location = '' self.affinity_group = '' self.label = _Base64String() self.status = '' self.date_created = '' self.date_last_modified = '' self.extended_properties = _DictOf( 'ExtendedProperty', 'Name', 'Value' ) class Deployment(WindowsAzureData): def __init__(self): self.name = '' self.deployment_slot = '' self.private_id = '' self.status = '' self.label = _Base64String() self.url = '' self.configuration = _Base64String() self.role_instance_list = RoleInstanceList() self.upgrade_status = UpgradeStatus() self.upgrade_domain_count = '' self.role_list = RoleList() self.sdk_version = '' self.input_endpoint_list = InputEndpoints() self.locked = False self.rollback_allowed = False self.persistent_vm_downtime_info = PersistentVMDowntimeInfo() self.created_time = '' self.last_modified_time = '' self.extended_properties = _DictOf( 'ExtendedProperty', 'Name', 'Value' ) self.virtual_ips = VirtualIPs() class Deployments(WindowsAzureDataTypedList): list_type = Deployment _repr_attributes = [ 'items' ] class UpgradeStatus(WindowsAzureData): def __init__(self): self.upgrade_type = '' self.current_upgrade_domain_state = '' self.current_upgrade_domain = '' class RoleInstance(WindowsAzureData): def __init__(self): self.role_name = '' self.instance_name = '' self.instance_status = '' self.instance_upgrade_domain = 0 self.instance_fault_domain = 0 self.instance_size = '' self.instance_state_details = '' self.instance_error_code = '' self.ip_address = '' self.instance_endpoints = InstanceEndpoints() self.power_state = '' self.fqdn = '' self.host_name = '' class RoleInstanceList(WindowsAzureDataTypedList): list_type = RoleInstance _repr_attributes = [ 'items' ] class InstanceEndpoint(WindowsAzureData): def __init__(self): self.name = '' self.vip = '' self.public_port = '' self.local_port = '' self.protocol = '' class InstanceEndpoints(WindowsAzureDataTypedList): list_type = InstanceEndpoint _repr_attributes = [ 'items' ] class InputEndpoint(WindowsAzureData): def __init__(self): self.role_name = '' self.vip = '' self.port = '' class InputEndpoints(WindowsAzureDataTypedList): list_type = InputEndpoint _repr_attributes = [ 'items' ] class Role(WindowsAzureData): def __init__(self): self.role_name = '' self.os_version = '' class RoleList(WindowsAzureDataTypedList): list_type = Role _repr_attributes = [ 'items' ] class PersistentVMDowntimeInfo(WindowsAzureData): def __init__(self): self.start_time = '' self.end_time = '' self.status = '' class AsynchronousOperationResult(WindowsAzureData): def __init__(self, request_id=None): self.request_id = request_id class Disk(WindowsAzureData): def __init__(self): self.affinity_group = '' self.attached_to = AttachedTo() self.has_operating_system = '' self.is_corrupted = '' self.location = '' self.logical_disk_size_in_gb = 0 self.label = '' self.media_link = '' self.name = '' self.os = '' self.source_image_name = '' class Disks(WindowsAzureDataTypedList): list_type = Disk _repr_attributes = [ 'items' ] class AttachedTo(WindowsAzureData): def __init__(self): self.hosted_service_name = '' self.deployment_name = '' self.role_name = '' class OperationError(WindowsAzureData): def __init__(self): self.code = '' self.message = '' class Operation(WindowsAzureData): def __init__(self): self.id = '' self.status = '' self.http_status_code = '' self.error = OperationError() class OperatingSystem(WindowsAzureData): def __init__(self): self.version = '' self.label = _Base64String() self.is_default = True self.is_active = True self.family = 0 self.family_label = _Base64String() class OSDiskConfiguration(WindowsAzureData): def __init__(self): self.name = '' self.host_caching = '' self.os_state = '' self.os = '' self.media_link = '' self.logical_disk_size_in_gb = 0 class OperatingSystems(WindowsAzureDataTypedList): list_type = OperatingSystem _repr_attributes = [ 'items' ] class OperatingSystemFamily(WindowsAzureData): def __init__(self): self.name = '' self.label = _Base64String() self.operating_systems = OperatingSystems() class OperatingSystemFamilies(WindowsAzureDataTypedList): list_type = OperatingSystemFamily _repr_attributes = [ 'items' ] class Subscription(WindowsAzureData): def __init__(self): self.subscription_id = '' self.subscription_name = '' self.subscription_status = '' self.account_admin_live_email_id = '' self.service_admin_live_email_id = '' self.max_core_count = 0 self.max_storage_accounts = 0 self.max_hosted_services = 0 self.current_core_count = 0 self.current_hosted_services = 0 self.current_storage_accounts = 0 self.max_virtual_network_sites = 0 self.max_local_network_sites = 0 self.max_dns_servers = 0 class AvailabilityResponse(WindowsAzureData): def __init__(self): self.result = False class SubscriptionCertificate(WindowsAzureData): def __init__(self): self.subscription_certificate_public_key = '' self.subscription_certificate_thumbprint = '' self.subscription_certificate_data = '' self.created = '' class SubscriptionCertificates(WindowsAzureDataTypedList): list_type = SubscriptionCertificate _repr_attributes = [ 'items' ] class AzureHTTPRequest(object): def __init__(self): self.host = '' self.method = '' self.path = '' self.query = [] # list of (name, value) self.headers = {} # list of (header name, header value) self.body = '' self.protocol_override = None class AzureHTTPResponse(object): def __init__(self, status, message, headers, body): self.status = status self.message = message self.headers = headers self.body = body """ Helper classes and functions. """ class _Base64String(str): pass class _ListOf(list): """ A list which carries with it the type that's expected to go in it. Used for deserializaion and construction of the lists """ def __init__(self, list_type, xml_element_name=None): self.list_type = list_type if xml_element_name is None: self.xml_element_name = list_type.__name__ else: self.xml_element_name = xml_element_name super(_ListOf, self).__init__() class ScalarListOf(list): """ A list of scalar types which carries with it the type that's expected to go in it along with its xml element name. Used for deserializaion and construction of the lists """ def __init__(self, list_type, xml_element_name): self.list_type = list_type self.xml_element_name = xml_element_name super(ScalarListOf, self).__init__() class _DictOf(dict): """ A dict which carries with it the xml element names for key,val. Used for deserializaion and construction of the lists """ def __init__(self, pair_xml_element_name, key_xml_element_name, value_xml_element_name): self.pair_xml_element_name = pair_xml_element_name self.key_xml_element_name = key_xml_element_name self.value_xml_element_name = value_xml_element_name super(_DictOf, self).__init__() class AzureNodeLocation(NodeLocation): # we can also have something in here for available services which is an # extra to the API with Azure def __init__(self, id, name, country, driver, available_services, virtual_machine_role_sizes): super(AzureNodeLocation, self).__init__(id, name, country, driver) self.available_services = available_services self.virtual_machine_role_sizes = virtual_machine_role_sizes def __repr__(self): return ( ( '' ) % ( self.id, self.name, self.country, self.driver.name, ','.join(self.available_services), ','.join(self.virtual_machine_role_sizes) ) ) apache-libcloud-2.2.1/libcloud/compute/drivers/onapp.py0000664000175000017500000003655113153541406023050 0ustar kamikami00000000000000import json from libcloud.utils.py3 import httplib from libcloud.utils.networking import is_private_subnet from libcloud.common.onapp import OnAppConnection from libcloud.compute.base import Node, NodeDriver, NodeImage, KeyPair from libcloud.compute.providers import Provider __all__ = [ "OnAppNodeDriver" ] """ Define the extra dictionary for specific resources """ RESOURCE_EXTRA_ATTRIBUTES_MAP = { "node": { "add_to_marketplace": { "key_name": "add_to_marketplace", "transform_func": bool }, "admin_note": { "key_name": "admin_note", "transform_func": str }, "allow_resize_without_reboot": { "key_name": "allow_resize_without_reboot", "transform_func": bool }, "allowed_hot_migrate": { "key_name": "allowed_hot_migrate", "transform_func": bool }, "allowed_swap": { "key_name": "allowed_swap", "transform_func": bool }, "booted": { "key_name": "booted", "transform_func": bool }, "built": { "key_name": "built", "transform_func": bool }, "cpu_priority": { "key_name": "cpu_priority", "transform_func": int }, "cpu_shares": { "key_name": "cpu_shares", "transform_func": int }, "cpu_sockets": { "key_name": "cpu_sockets", "transform_func": int }, "cpu_threads": { "key_name": "cpu_threads", "transform_func": int }, "cpu_units": { "key_name": "cpu_units", "transform_func": int }, "cpus": { "key_name": "cpus", "transform_func": int }, "created_at": { "key_name": "created_at", "transform_func": str }, "customer_network_id": { "key_name": "customer_network_id", "transform_func": str }, "deleted_at": { "key_name": "deleted_at", "transform_func": str }, "edge_server_type": { "key_name": "edge_server_type", "transform_func": str }, "enable_autoscale": { "key_name": "enable_autoscale", "transform_func": bool }, "enable_monitis": { "key_name": "enable_monitis", "transform_func": bool }, "firewall_notrack": { "key_name": "firewall_notrack", "transform_func": bool }, "hostname": { "key_name": "hostname", "transform_func": str }, "hypervisor_id": { "key_name": "hypervisor_id", "transform_func": int }, "id": { "key_name": "id", "transform_func": int }, "initial_root_password": { "key_name": "initial_root_password", "transform_func": str }, "initial_root_password_encrypted": { "key_name": "initial_root_password_encrypted", "transform_func": bool }, "local_remote_access_ip_address": { "key_name": "local_remote_access_ip_address", "transform_func": str }, "local_remote_access_port": { "key_name": "local_remote_access_port", "transform_func": int }, "locked": { "key_name": "locked", "transform_func": bool }, "memory": { "key_name": "memory", "transform_func": int }, "min_disk_size": { "key_name": "min_disk_size", "transform_func": int }, "monthly_bandwidth_used": { "key_name": "monthly_bandwidth_used", "transform_func": int }, "note": { "key_name": "note", "transform_func": str }, "operating_system": { "key_name": "operating_system", "transform_func": str }, "operating_system_distro": { "key_name": "operating_system_distro", "transform_func": str }, "preferred_hvs": { "key_name": "preferred_hvs", "transform_func": list }, "price_per_hour": { "key_name": "price_per_hour", "transform_func": float }, "price_per_hour_powered_off": { "key_name": "price_per_hour_powered_off", "transform_func": float }, "recovery_mode": { "key_name": "recovery_mode", "transform_func": bool }, "remote_access_password": { "key_name": "remote_access_password", "transform_func": str }, "service_password": { "key_name": "service_password", "transform_func": str }, "state": { "key_name": "state", "transform_func": str }, "storage_server_type": { "key_name": "storage_server_type", "transform_func": str }, "strict_virtual_machine_id": { "key_name": "strict_virtual_machine_id", "transform_func": str }, "support_incremental_backups": { "key_name": "support_incremental_backups", "transform_func": bool }, "suspended": { "key_name": "suspended", "transform_func": bool }, "template_id": { "key_name": "template_id", "transform_func": int }, "template_label": { "key_name": "template_label", "transform_func": str }, "total_disk_size": { "key_name": "total_disk_size", "transform_func": int }, "updated_at": { "key_name": "updated_at", "transform_func": str }, "user_id": { "key_name": "user_id", "transform_func": int }, "vip": { "key_name": "vip", "transform_func": bool }, "xen_id": { "key_name": "xen_id", "transform_func": int } } } class OnAppNodeDriver(NodeDriver): """ Base OnApp node driver. """ connectionCls = OnAppConnection type = Provider.ONAPP name = 'OnApp' website = 'http://onapp.com/' def create_node(self, name, ex_memory, ex_cpus, ex_cpu_shares, ex_hostname, ex_template_id, ex_primary_disk_size, ex_swap_disk_size, ex_required_virtual_machine_build=1, ex_required_ip_address_assignment=1, **kwargs): """ Add a VS :param kwargs: All keyword arguments to create a VS :type kwargs: ``dict`` :rtype: :class:`OnAppNode` """ server_params = dict( label=name, memory=ex_memory, cpus=ex_cpus, cpu_shares=ex_cpu_shares, hostname=ex_hostname, template_id=ex_template_id, primary_disk_size=ex_primary_disk_size, swap_disk_size=ex_swap_disk_size, required_virtual_machine_build=ex_required_virtual_machine_build, required_ip_address_assignment=ex_required_ip_address_assignment, rate_limit=kwargs.get("rate_limit") ) server_params.update(OnAppNodeDriver._create_args_to_params(**kwargs)) data = json.dumps({"virtual_machine": server_params}) response = self.connection.request( "/virtual_machines.json", data=data, headers={ "Content-type": "application/json"}, method="POST") return self._to_node(response.object["virtual_machine"]) def destroy_node(self, node, ex_convert_last_backup=0, ex_destroy_all_backups=0): """ Delete a VS :param node: OnApp node :type node: :class: `OnAppNode` :param convert_last_backup: set 1 to convert the last VS's backup to template, otherwise set 0 :type convert_last_backup: ``int`` :param destroy_all_backups: set 1 to destroy all existing backups of this VS, otherwise set 0 :type destroy_all_backups: ``int`` """ server_params = { "convert_last_backup": ex_convert_last_backup, "destroy_all_backups": ex_destroy_all_backups } action = "/virtual_machines/{identifier}.json".format( identifier=node.id) self.connection.request(action, params=server_params, method="DELETE") return True def list_nodes(self): """ List all VS :rtype: ``list`` of :class:`OnAppNode` """ response = self.connection.request("/virtual_machines.json") nodes = [] for vm in response.object: nodes.append(self._to_node(vm["virtual_machine"])) return nodes def list_images(self): """ List all images :rtype: ``list`` of :class:`NodeImage` """ response = self.connection.request("/templates.json") templates = [] for template in response.object: templates.append(self._to_image(template["image_template"])) return templates def list_key_pairs(self): """ List all the available key pair objects. :rtype: ``list`` of :class:`.KeyPair` objects """ user_id = self.connection.request('/profile.json').object['user']['id'] response = self.connection.request('/users/%s/ssh_keys.json' % user_id) ssh_keys = [] for ssh_key in response.object: ssh_keys.append(self._to_key_pair(ssh_key['ssh_key'])) return ssh_keys def get_key_pair(self, name): """ Retrieve a single key pair. :param name: ID of the key pair to retrieve. :type name: ``str`` :rtype: :class:`.KeyPair` object """ user_id = self.connection.request('/profile.json').object['user']['id'] response = self.connection.request( '/users/%s/ssh_keys/%s.json' % (user_id, name)) return self._to_key_pair(response.object['ssh_key']) def import_key_pair_from_string(self, name, key_material): """ Import a new public key from string. :param name: Key pair name (unused). :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :rtype: :class:`.KeyPair` object """ data = json.dumps({'key': key_material}) user_id = self.connection.request('/profile.json').object['user']['id'] response = self.connection.request( '/users/%s/ssh_keys.json' % user_id, data=data, headers={ "Content-type": "application/json"}, method="POST") return self._to_key_pair(response.object['ssh_key']) def delete_key_pair(self, key): """ Delete an existing key pair. :param key_pair: Key pair object. :type key_pair: :class:`.KeyPair` :return: True on success :rtype: ``bool`` """ key_id = key.name response = self.connection.request( '/settings/ssh_keys/%s.json' % key_id, method='DELETE') return response.status == httplib.NO_CONTENT # # Helper methods # def _to_key_pair(self, data): extra = {'created_at': data['created_at'], 'updated_at': data['updated_at']} return KeyPair(name=data['id'], fingerprint=None, public_key=data['key'], private_key=None, driver=self, extra=extra) def _to_image(self, template): extra = {'distribution': template['operating_system_distro'], 'operating_system': template['operating_system'], 'operating_system_arch': template['operating_system_arch'], 'allow_resize_without_reboot': template['allow_resize_without_reboot'], 'allowed_hot_migrate': template['allowed_hot_migrate'], 'allowed_swap': template['allowed_swap'], 'min_disk_size': template['min_disk_size'], 'min_memory_size': template['min_memory_size'], 'created_at': template['created_at']} return NodeImage(id=template['id'], name=template['label'], driver=self, extra=extra) def _to_node(self, data): identifier = data["identifier"] name = data["label"] private_ips = [] public_ips = [] for ip in data["ip_addresses"]: address = ip["ip_address"]['address'] if is_private_subnet(address): private_ips.append(address) else: public_ips.append(address) extra = OnAppNodeDriver._get_extra_dict( data, RESOURCE_EXTRA_ATTRIBUTES_MAP["node"] ) return Node(identifier, name, extra['state'], public_ips, private_ips, self, extra=extra) @staticmethod def _get_extra_dict(response, mapping): """ Extract attributes from the element based on rules provided in the mapping dictionary. :param response: The JSON response to parse the values from. :type response: ``dict`` :param mapping: Dictionary with the extra layout :type mapping: ``dict`` :rtype: ``dict`` """ extra = {} for attribute, values in mapping.items(): transform_func = values["transform_func"] value = response.get(values["key_name"]) extra[attribute] = transform_func(value) if value else None return extra @staticmethod def _create_args_to_params(**kwargs): """ Extract server params from keyword args to create a VS :param kwargs: keyword args :return: ``dict`` """ params = [ "ex_cpu_sockets", "ex_cpu_threads", "ex_enable_autoscale", "ex_data_store_group_primary_id", "ex_data_store_group_swap_id", "ex_hypervisor_group_id", "ex_hypervisor_id", "ex_initial_root_password", "ex_note", "ex_primary_disk_min_iops", "ex_primary_network_id", "ex_primary_network_group_id", "ex_recipe_ids", "ex_required_automatic_backup", "ex_required_virtual_machine_startup", "ex_required_virtual_machine_startup", "ex_selected_ip_address_id", "ex_swap_disk_min_iops", "ex_type_of_format", "ex_custom_recipe_variables", "ex_licensing_key", "ex_licensing_server_id", "ex_licensing_type", ] server_params = {} for p in params: value = kwargs.get(p) if value: server_params[p[3:]] = value return server_params apache-libcloud-2.2.1/libcloud/compute/drivers/vultr.py0000664000175000017500000003170713153541406023105 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Vultr Driver """ import time from functools import update_wrapper from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import Provider, NodeState from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError from libcloud.common.types import ServiceUnavailableError from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation class rate_limited: """ Decorator for retrying Vultr calls that are rate-limited. :param int sleep: Seconds to sleep after being rate-limited. :param int retries: Number of retries. """ def __init__(self, sleep=1, retries=1): self.sleep = sleep self.retries = retries def __call__(self, call): """ Run ``call`` method until it's not rate-limited. The method is invoked while it returns 503 Service Unavailable or the allowed number of retries is reached. :param callable call: Method to be decorated. """ def wrapper(*args, **kwargs): last_exception = None for i in range(self.retries + 1): try: return call(*args, **kwargs) except ServiceUnavailableError as e: last_exception = e time.sleep(self.sleep) # hit by rate limit, let's sleep raise last_exception update_wrapper(wrapper, call) return wrapper class VultrResponse(JsonResponse): def parse_error(self): if self.status == httplib.OK: body = self.parse_body() return body elif self.status == httplib.FORBIDDEN: raise InvalidCredsError(self.body) elif self.status == httplib.SERVICE_UNAVAILABLE: raise ServiceUnavailableError(self.body) else: raise LibcloudError(self.body) class SSHKey(object): def __init__(self, id, name, pub_key): self.id = id self.name = name self.pub_key = pub_key def __repr__(self): return (('') % (self.id, self.name, self.pub_key)) class VultrConnection(ConnectionKey): """ Connection class for the Vultr driver. """ host = 'api.vultr.com' responseCls = VultrResponse unauthenticated_endpoints = { # {action: methods} '/v1/app/list': ['GET'], '/v1/os/list': ['GET'], '/v1/plans/list': ['GET'], '/v1/plans/list_vc2': ['GET'], '/v1/plans/list_vdc2': ['GET'], '/v1/regions/availability': ['GET'], '/v1/regions/list': ['GET'] } def add_default_headers(self, headers): """ Adds ``API-Key`` default header. :return: Updated headers. :rtype: dict """ if self.require_api_key(): headers.update({'API-Key': self.key}) return headers def encode_data(self, data): return urlencode(data) @rate_limited() def get(self, url): return self.request(url) @rate_limited() def post(self, url, data): headers = {'Content-Type': 'application/x-www-form-urlencoded'} return self.request(url, data=data, headers=headers, method='POST') def require_api_key(self): """ Check whether this call (method + action) must be authenticated. :return: True if ``API-Key`` header required, False otherwise. :rtype: bool """ try: return self.method \ not in self.unauthenticated_endpoints[self.action] except KeyError: return True class VultrNodeDriver(NodeDriver): """ VultrNode node driver. """ connectionCls = VultrConnection type = Provider.VULTR name = 'Vultr' website = 'https://www.vultr.com' NODE_STATE_MAP = {'pending': NodeState.PENDING, 'active': NodeState.RUNNING} EX_CREATE_YES_NO_ATTRIBUTES = ['enable_ipv6', 'enable_private_network', 'auto_backups', 'notify_activate', 'ddos_protection'] EX_CREATE_ID_ATTRIBUTES = {'iso_id': 'ISOID', 'script_id': 'SCRIPTID', 'snapshot_id': 'SNAPSHOTID', 'app_id': 'APPID'} EX_CREATE_ATTRIBUTES = ['ipxe_chain_url', 'label', 'userdata', 'reserved_ip_v4', 'hostname', 'tag'] EX_CREATE_ATTRIBUTES.extend(EX_CREATE_YES_NO_ATTRIBUTES) EX_CREATE_ATTRIBUTES.extend(EX_CREATE_ID_ATTRIBUTES.keys()) def list_nodes(self): return self._list_resources('/v1/server/list', self._to_node) def list_key_pairs(self): """ List all the available SSH keys. :return: Available SSH keys. :rtype: ``list`` of :class:`SSHKey` """ return self._list_resources('/v1/sshkey/list', self._to_ssh_key) def create_key_pair(self, name, public_key=''): """ Create a new SSH key. :param name: Name of the new SSH key :type name: ``str`` :key public_key: Public part of the new SSH key :type name: ``str`` :return: True on success :rtype: ``bool`` """ params = {'name': name, 'ssh_key': public_key} res = self.connection.post('/v1/sshkey/create', params) return res.status == httplib.OK def delete_key_pair(self, key_pair): """ Delete an SSH key. :param key_pair: The SSH key to delete :type key_pair: :class:`SSHKey` :return: True on success :rtype: ``bool`` """ params = {'SSHKEYID': key_pair.id} res = self.connection.post('/v1/sshkey/destroy', params) return res.status == httplib.OK def list_locations(self): return self._list_resources('/v1/regions/list', self._to_location) def list_sizes(self): return self._list_resources('/v1/plans/list', self._to_size) def list_images(self): return self._list_resources('/v1/os/list', self._to_image) def create_node(self, name, size, image, location, ex_ssh_key_ids=None, ex_create_attr=None): """ Create a node :param name: Name for the new node :type name: ``str`` :param size: Size of the new node :type size: :class:`NodeSize` :param image: Image for the new node :type image: :class:`NodeImage` :param location: Location of the new node :type location: :class:`NodeLocation` :param ex_ssh_key_ids: IDs of the SSH keys to initialize :type ex_sshkeyid: ``list`` of ``str`` :param ex_create_attr: Extra attributes for node creation :type ex_create_attr: ``dict`` The `ex_create_attr` parameter can include the following dictionary key and value pairs: * `ipxe_chain_url`: ``str`` for specifying URL to boot via IPXE * `iso_id`: ``str`` the ID of a specific ISO to mount, only meaningful with the `Custom` `NodeImage` * `script_id`: ``int`` ID of a startup script to execute on boot, only meaningful when the `NodeImage` is not `Custom` * 'snapshot_id`: ``str`` Snapshot ID to restore for the initial installation, only meaningful with the `Snapshot` `NodeImage` * `enable_ipv6`: ``bool`` Whether an IPv6 subnet should be assigned * `enable_private_network`: ``bool`` Whether private networking support should be added * `label`: ``str`` Text label to be shown in the control panel * `auto_backups`: ``bool`` Whether automatic backups should be enabled * `app_id`: ``int`` App ID to launch if launching an application, only meaningful when the `NodeImage` is `Application` * `userdata`: ``str`` Base64 encoded cloud-init user-data * `notify_activate`: ``bool`` Whether an activation email should be sent when the server is ready * `ddos_protection`: ``bool`` Whether DDOS protection should be enabled * `reserved_ip_v4`: ``str`` IP address of the floating IP to use as the main IP of this server * `hostname`: ``str`` The hostname to assign to this server * `tag`: ``str`` The tag to assign to this server :return: The newly created node. :rtype: :class:`Node` """ params = {'DCID': location.id, 'VPSPLANID': size.id, 'OSID': image.id, 'label': name} if ex_ssh_key_ids is not None: params['SSHKEYID'] = ','.join(ex_ssh_key_ids) ex_create_attr = ex_create_attr or {} for key, value in ex_create_attr.items(): if key in self.EX_CREATE_ATTRIBUTES: if key in self.EX_CREATE_YES_NO_ATTRIBUTES: params[key] = 'yes' if value else 'no' else: if key in self.EX_CREATE_ID_ATTRIBUTES: key = self.EX_CREATE_ID_ATTRIBUTES[key] params[key] = value result = self.connection.post('/v1/server/create', params) if result.status != httplib.OK: return False subid = result.object['SUBID'] retry_count = 3 created_node = None for i in range(retry_count): try: nodes = self.list_nodes() created_node = [n for n in nodes if n.id == subid][0] except IndexError: time.sleep(1) else: break return created_node def reboot_node(self, node): params = {'SUBID': node.id} res = self.connection.post('/v1/server/reboot', params) return res.status == httplib.OK def destroy_node(self, node): params = {'SUBID': node.id} res = self.connection.post('/v1/server/destroy', params) return res.status == httplib.OK def _list_resources(self, url, tranform_func): data = self.connection.get(url).object sorted_key = sorted(data) return [tranform_func(data[key]) for key in sorted_key] def _to_node(self, data): if 'status' in data: state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) if state == NodeState.RUNNING and \ data['power_status'] != 'running': state = NodeState.STOPPED else: state = NodeState.UNKNOWN if 'main_ip' in data and data['main_ip'] is not None: public_ips = [data['main_ip']] else: public_ips = [] extra_keys = [] extra = {} for key in extra_keys: if key in data: extra[key] = data[key] node = Node(id=data['SUBID'], name=data['label'], state=state, public_ips=public_ips, private_ips=None, extra=extra, driver=self) return node def _to_location(self, data): return NodeLocation(id=data['DCID'], name=data['name'], country=data['country'], driver=self) def _to_size(self, data): extra = { 'vcpu_count': int(data['vcpu_count']), 'plan_type': data['plan_type'], 'available_locations': data['available_locations'] } ram = int(data['ram']) disk = int(data['disk']) bandwidth = float(data['bandwidth']) price = float(data['price_per_month']) return NodeSize(id=data['VPSPLANID'], name=data['name'], ram=ram, disk=disk, bandwidth=bandwidth, price=price, extra=extra, driver=self) def _to_image(self, data): extra = {'arch': data['arch'], 'family': data['family']} return NodeImage(id=data['OSID'], name=data['name'], extra=extra, driver=self) def _to_ssh_key(self, data): return SSHKey(id=data['SSHKEYID'], name=data['name'], pub_key=data['ssh_key']) apache-libcloud-2.2.1/libcloud/compute/drivers/profitbricks.py0000664000175000017500000034562713153541406024443 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ProfitBricks Compute driver """ import base64 import json import copy import time import urllib from libcloud.utils.py3 import b from libcloud.compute.providers import Provider from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot from libcloud.compute.base import UuidMixin from libcloud.compute.types import NodeState from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.common.exceptions import BaseHTTPError from collections import defaultdict __all__ = [ 'API_VERSION', 'API_HOST', 'ProfitBricksNodeDriver', 'Datacenter', 'ProfitBricksNetworkInterface', 'ProfitBricksFirewallRule', 'ProfitBricksLan', 'ProfitBricksLoadBalancer', 'ProfitBricksAvailabilityZone', 'ProfitBricksIPBlock' ] API_HOST = 'api.profitbricks.com' API_VERSION = '/cloudapi/v3/' class ProfitBricksResponse(JsonResponse): """ ProfitBricks response parsing. """ def parse_error(self): http_code = None fault_code = None message = None try: body = json.loads(self.body) if 'httpStatus' in body: http_code = body['httpStatus'] else: http_code = 'unknown' if 'messages' in body: message = ', '.join(list(map( lambda item: item['message'], body['messages']))) fault_code = ', '.join(list(map( lambda item: item['errorCode'], body['messages']))) else: message = 'No messages returned.' fault_code = 'unknown' except Exception: raise MalformedResponseError('Failed to parse Json', body=self.body, driver=ProfitBricksNodeDriver) return LibcloudError( ''' HTTP Code: %s, Fault Code(s): %s, Message(s): %s ''' % (http_code, fault_code, message), driver=self) class ProfitBricksConnection(ConnectionUserAndKey): """ Represents a single connection to the ProfitBricks endpoint. """ host = API_HOST api_prefix = API_VERSION responseCls = ProfitBricksResponse def add_default_headers(self, headers): headers['Authorization'] = 'Basic %s' % (base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) return headers def encode_data(self, data): ''' If a string is passed in, just return it or else if a dict is passed in, encode it as a json string. ''' if type(data) is str: return data elif type(data) is dict: return json.dumps(data) else: return '' def request(self, action, params=None, data=None, headers=None, method='GET', raw=False, with_full_url=False): ''' Some requests will use the href attribute directly. If this is not the case, then we should formulate the url based on the action specified. If we are using a full url, we need to remove the host and protocol components. ''' if not with_full_url or with_full_url is False: action = self.api_prefix + action.lstrip('/') else: action = action.replace( 'https://{host}'.format(host=self.host), '' ) return super(ProfitBricksConnection, self).request( action=action, params=params, data=data, headers=headers, method=method, raw=raw ) class Datacenter(UuidMixin): """ Class which stores information about ProfitBricks datacenter instances. :param id: The datacenter ID. :type id: ``str`` :param href: The datacenter href. :type href: ``str`` :param name: The datacenter name. :type name: ``str`` :param version: Datacenter version. :type version: ``str`` :param driver: ProfitBricks Node Driver. :type driver: :class:`ProfitBricksNodeDriver` :param extra: Extra properties for the Datacenter. :type extra: ``dict`` Note: This class is ProfitBricks specific. """ def __init__(self, id, href, name, version, driver, extra=None): self.id = str(id) self.href = href self.name = name self.version = version self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return (( ' ...>') % (self.id, self.href, self.name, self.version, self.driver.name)) class ProfitBricksNetworkInterface(object): """ Class which stores information about ProfitBricks network interfaces. :param id: The network interface ID. :type id: ``str`` :param name: The network interface name. :type name: ``str`` :param href: The network interface href. :type href: ``str`` :param state: The network interface name. :type state: ``int`` :param extra: Extra properties for the network interface. :type extra: ``dict`` Note: This class is ProfitBricks specific. """ def __init__(self, id, name, href, state, extra=None): self.id = id self.name = name self.href = href self.state = state self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.name, self.href)) class ProfitBricksFirewallRule(object): """ Extension class which stores information about a ProfitBricks firewall rule. :param id: The firewall rule ID. :type id: ``str`` :param name: The firewall rule name. :type name: ``str`` :param href: The firewall rule href. :type href: ``str`` :param state: The current state of the firewall rule. :type state: ``int`` :param extra: Extra properties for the firewall rule. :type extra: ``dict`` Note: This class is ProfitBricks specific. """ def __init__(self, id, name, href, state, extra=None): self.id = id self.name = name self.href = href self.state = state self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.name, self.href)) class ProfitBricksLan(object): """ Extension class which stores information about a ProfitBricks LAN :param id: The ID of the lan. :param id: ``str`` :param name: The name of the lan. :type name: ``str`` :param href: The lan href. :type href: ``str`` :param is_public: If public, the lan faces the public internet. :type is_public: ``bool`` :param state: The current state of the lan. :type state: ``int`` :param extra: Extra properties for the lan. :type extra: ``dict`` Note: This class is ProfitBricks specific. """ def __init__(self, id, name, href, is_public, state, driver, extra=None): self.id = id self.name = name self.href = href self.is_public = is_public self.state = state self.driver = driver self.extra = extra or {} def __repr__(self): return (('') % (self.id, self.name, self.href)) class ProfitBricksLoadBalancer(object): """ Extention class which stores information about a ProfitBricks load balancer :param id: The ID of the load balancer. :param id: ``str`` :param name: The name of the load balancer. :type name: ``str`` :param href: The load balancer href. :type href: ``str`` :param state: The current state of the load balancer. :type state: ``int`` :param extra: Extra properties for the load balancer. :type extra: ``dict`` Note: This class is ProfitBricks specific """ def __init__(self, id, name, href, state, driver, extra=None): self.id = id self.name = name self.href = href self.state = state self.driver = driver self.extra = extra or {} def __repr__(self): return (('ProfitBricksLoadbalancer: id=%s, name=%s, href=%s>') % (self.id, self.name, self.href)) class ProfitBricksAvailabilityZone(object): """ Extension class which stores information about a ProfitBricks availability zone. :param name: The availability zone name. :type name: ``str`` Note: This class is ProfitBricks specific. """ def __init__(self, name): self.name = name def __repr__(self): return (('') % (self.name)) class ProfitBricksIPBlock(object): """ Extension class which stores information about a ProfitBricks IP block. :param id: The ID of the IP block. :param id: ``str`` :param name: The name of the IP block. :type name: ``str`` :param href: The IP block href. :type href: ``str`` :param location: The location of the IP block. :type location: ``str`` :param size: Number of IP addresses in the block. :type size: ``int`` :param ips: A collection of IPs associated with the block. :type ips: ``list`` :param state: The current state of the IP block. :type state: ``int`` :param extra: Extra properties for the IP block. :type extra: ``dict`` Note: This class is ProfitBricks specific """ def __init__( self, id, name, href, location, size, ips, state, driver, extra=None ): self.id = id self.name = name self.href = href self.location = location self.size = size self.ips = ips self.state = state self.driver = driver self.extra = extra or {} def __repr__(self): return ( ( '' ) % (self.id, self.name, self.href, self.location, self.size) ) class ProfitBricksNodeDriver(NodeDriver): """ Base ProfitBricks node driver. """ connectionCls = ProfitBricksConnection name = 'ProfitBricks' website = 'http://www.profitbricks.com' type = Provider.PROFIT_BRICKS PROVISIONING_STATE = { 'AVAILABLE': NodeState.RUNNING, 'BUSY': NodeState.PENDING, 'INACTIVE': NodeState.PENDING } NODE_STATE_MAP = { 'NOSTATE': NodeState.UNKNOWN, 'RUNNING': NodeState.RUNNING, 'BLOCKED': NodeState.STOPPED, 'PAUSE': NodeState.PAUSED, 'SHUTDOWN': NodeState.STOPPING, 'SHUTOFF': NodeState.STOPPED, 'CRASHED': NodeState.ERROR, 'AVAILABLE': NodeState.RUNNING, 'BUSY': NodeState.PENDING } AVAILABILITY_ZONE = { '1': {'name': 'AUTO'}, '2': {'name': 'ZONE_1'}, '3': {'name': 'ZONE_2'}, } """ ProfitBricks is unique in that they allow the user to define all aspects of the instance size, i.e. disk size, core size, and memory size. These are instance types that match up with what other providers support. You can configure disk size, core size, and memory size using the ``ex_`` parameters on the create_node method. """ PROFIT_BRICKS_GENERIC_SIZES = { '1': { 'id': '1', 'name': 'Micro', 'ram': 1024, 'disk': 50, 'cores': 1 }, '2': { 'id': '2', 'name': 'Small Instance', 'ram': 2048, 'disk': 50, 'cores': 1 }, '3': { 'id': '3', 'name': 'Medium Instance', 'ram': 4096, 'disk': 50, 'cores': 2 }, '4': { 'id': '4', 'name': 'Large Instance', 'ram': 7168, 'disk': 50, 'cores': 4 }, '5': { 'id': '5', 'name': 'ExtraLarge Instance', 'ram': 14336, 'disk': 50, 'cores': 8 }, '6': { 'id': '6', 'name': 'Memory Intensive Instance Medium', 'ram': 28672, 'disk': 50, 'cores': 4 }, '7': { 'id': '7', 'name': 'Memory Intensive Instance Large', 'ram': 57344, 'disk': 50, 'cores': 8 } } """ Core Functions """ def list_sizes(self): """ Lists all sizes :return: A list of all configurable node sizes. :rtype: ``list`` of :class:`NodeSize` """ sizes = [] for key, values in self.PROFIT_BRICKS_GENERIC_SIZES.items(): node_size = self._to_node_size(values) sizes.append(node_size) return sizes def list_images(self, image_type=None, is_public=True): """ List all images with an optional filter. :param image_type: The image type (HDD, CDROM) :type image_type: ``str`` :param is_public: Image is public :type is_public: ``bool`` :return: ``list`` of :class:`NodeImage` :rtype: ``list`` """ response = self.connection.request( action='images', params={'depth': 1}, method='GET' ) return self._to_images(response.object, image_type, is_public) def list_locations(self): """ List all locations. :return: ``list`` of :class:`NodeLocation` :rtype: ``list`` """ return self._to_locations(self.connection.request( action='locations', params={'depth': 1}, method='GET').object ) """ Node functions """ def list_nodes(self): """ List all nodes. :return: ``list`` of :class:`Node` :rtype: ``list`` """ datacenters = self.ex_list_datacenters() nodes = list() for datacenter in datacenters: servers_href = datacenter.extra['entities']['servers']['href'] response = self.connection.request( action=servers_href, params={'depth': 3}, method='GET', with_full_url=True ) mapped_nodes = self._to_nodes(response.object) nodes += mapped_nodes return nodes def reboot_node(self, node): """ Reboots the node. :rtype: ``bool`` """ action = node.extra['href'] + '/reboot' self.connection.request( action=action, method='POST', with_full_url=True ) return True def create_node( self, name, image=None, size=None, location=None, ex_cpu_family=None, volume=None, ex_datacenter=None, ex_network_interface=True, ex_internet_access=True, ex_exposed_public_ports=[], ex_exposed_private_ports=[22], ex_availability_zone=None, ex_ram=None, ex_cores=None, ex_disk=None, ex_password=None, ex_ssh_keys=None, ex_bus_type=None, ex_disk_type=None, **kwargs ): """ Creates a node. image is optional as long as you pass ram, cores, and disk to the method. ProfitBricks allows you to adjust compute resources at a much more granular level. :param name: The name for the new node. :param type: ``str`` :param image: The image to create the node with. :type image: :class:`NodeImage` :param size: Standard configured size offered by ProfitBricks - containing configuration for the number of cpu cores, amount of ram and disk size. :param size: :class:`NodeSize` :param location: The location of the new data center if one is not supplied. :type location: :class:`NodeLocation` :param ex_cpu_family: The CPU family to use (AMD_OPTERON, INTEL_XEON) :type ex_cpu_family: ``str`` :param volume: If the volume already exists then pass this in. :type volume: :class:`StorageVolume` :param ex_datacenter: If you've already created the DC then pass it in. :type ex_datacenter: :class:`Datacenter` :param ex_network_interface: Create with a network interface. :type ex_network_interface: : ``bool`` :param ex_internet_access: Configure public Internet access. :type ex_internet_access: : ``bool`` :param ex_exposed_public_ports: Ports to be opened for the public nic. :param ex_exposed_public_ports: ``list`` of ``int`` :param ex_exposed_private_ports: Ports to be opened for the private nic. :param ex_exposed_private_ports: ``list`` of ``int`` :param ex_availability_zone: The availability zone. :type ex_availability_zone: class: `ProfitBricksAvailabilityZone` :param ex_ram: The amount of ram required. :type ex_ram: : ``int`` :param ex_cores: The number of cores required. :type ex_cores: ``int`` :param ex_disk: The amount of disk required. :type ex_disk: ``int`` :param ex_password: The password for the volume. :type ex_password: ``str`` :param ex_ssh_keys: Optional SSH keys for the volume. :type ex_ssh_keys: ``list`` of ``str`` :param ex_bus_type: Volume bus type (VIRTIO, IDE). :type ex_bus_type: ``str`` :param ex_disk_type: Volume disk type (SSD, HDD). :type ex_disk_type: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ """ If we have a volume we can determine the DC that it belongs to and set accordingly. """ if volume is not None: dc_url_pruned = volume.extra['href'].split('/')[:-2] dc_url = '/'.join(item for item in dc_url_pruned) ex_datacenter = self.ex_describe_datacenter( ex_href=dc_url ) if not ex_datacenter: ''' Determine location for new DC by getting the location of the image. ''' if not location: if image is not None: location = self.ex_describe_location( ex_location_id=image.extra['location'] ) ''' Creating a Datacenter for the node since one was not provided. ''' new_datacenter = self._create_new_datacenter_for_node( name=name, location=location ) ''' Then wait for the operation to finish, assigning the full data center on completion. ''' ex_datacenter = self._wait_for_datacenter_state( datacenter=new_datacenter ) if not size: if not ex_ram: raise ValueError('You need to either pass a ' 'NodeSize or specify ex_ram as ' 'an extra parameter.') if not ex_cores: raise ValueError('You need to either pass a ' 'NodeSize or specify ex_cores as ' 'an extra parameter.') ''' If passing in an image we need to enfore a password or ssh keys. ''' if not volume and image is not None: if ex_password is None and ex_ssh_keys is None: raise ValueError( ( 'When creating a server without a ' 'volume, you need to specify either an ' 'array of SSH keys or a volume password.' ) ) if not size: if not ex_disk: raise ValueError('You need to either pass a ' 'StorageVolume, a NodeSize, or specify ' 'ex_disk as an extra parameter.') ''' You can override the suggested sizes by passing in unique values for ram, cores, and disk allowing you to size it for your specific use. ''' if image is not None: if not ex_disk: ex_disk = size.disk if not ex_disk_type: ex_disk_type = 'HDD' if not ex_bus_type: ex_bus_type = 'VIRTIO' if not ex_ram: ex_ram = size.ram if not ex_cores: ex_cores = size.extra['cores'] action = ex_datacenter.href + '/servers' body = { 'properties': { 'name': name, 'ram': ex_ram, 'cores': ex_cores }, 'entities': { 'volumes': { 'items': [] } } } ''' If we are using a pre-existing storage volume. ''' if volume is not None: body['entities']['volumes']['items'].append({'id': volume.id}) elif image is not None: new_volume = { 'properties': { 'size': ex_disk, 'name': name + ' - volume', 'image': image.id, 'type': ex_disk_type, 'bus': ex_bus_type } } if ex_password is not None: new_volume['properties']['imagePassword'] = ex_password if ex_ssh_keys is not None: new_volume['properties']['sshKeys'] = ex_ssh_keys body['entities']['volumes']['items'].append(new_volume) if ex_network_interface is True: body['entities']['nics'] = {} body['entities']['nics']['items'] = list() ''' Get the LANs for the data center this node will be provisioned at. ''' dc_lans = self.ex_list_lans( datacenter=ex_datacenter ) private_lans = [lan for lan in dc_lans if lan.is_public is False] private_lan = None if private_lans: private_lan = private_lans[0] if private_lan is not None: private_nic = { 'properties': { 'name': name + ' - private nic', 'lan': private_lan.id, }, 'entities': { 'firewallrules': { 'items': [] } } } for port in ex_exposed_private_ports: private_nic['entities']['firewallrules']['items'].append( { 'properties': { 'name': ( '{name} - firewall rule:{port}'.format( name=name, port=port ) ), 'protocol': 'TCP', 'portRangeStart': port, 'portRangeEnd': port } } ) body['entities']['nics']['items'].append(private_nic) if ex_internet_access is not None and ex_internet_access is True: public_lans = [lan for lan in dc_lans if lan.is_public] public_lan = None if public_lans: public_lan = public_lans[0] if public_lan is not None: pub_nic = { 'properties': { 'name': name + ' - public nic', 'lan': public_lan.id, }, 'entities': { 'firewallrules': { 'items': [] } } } for port in ex_exposed_public_ports: pub_nic['entities']['firewallrules']['items'].append( { 'properties': { 'name': ( '{name} - firewall rule:{port}'.format( name=name, port=port ) ), 'protocol': 'TCP', 'portRangeStart': port, 'portRangeEnd': port } } ) body['entities']['nics']['items'].append(pub_nic) if ex_cpu_family is not None: body['properties']['cpuFamily'] = ex_cpu_family if ex_availability_zone is not None: body['properties']['availabilityZone'] = ex_availability_zone.name response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_node(response.object, response.headers) def destroy_node(self, node, ex_remove_attached_disks=False): """ Destroys a node. :param node: The node you wish to destroy. :type volume: :class:`Node` :param ex_remove_attached_disks: True to destroy all attached volumes. :type ex_remove_attached_disks: : ``bool`` :rtype: : ``bool`` """ if ex_remove_attached_disks is True: for volume in self.ex_list_attached_volumes(node): self.destroy_volume(volume) action = node.extra['href'] self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ Volume Functions """ def list_volumes(self): """ List all volumes attached to a data center. :return: ``list`` of :class:`StorageVolume` :rtype: ``list`` """ datacenters = self.ex_list_datacenters() volumes = list() for datacenter in datacenters: volumes_href = datacenter.extra['entities']['volumes']['href'] response = self.connection.request( action=volumes_href, params={'depth': 3}, method='GET', with_full_url=True ) mapped_volumes = self._to_volumes(response.object) volumes += mapped_volumes return volumes def attach_volume(self, node, volume): """ Attaches a volume. :param node: The node to which you're attaching the volume. :type node: :class:`Node` :param volume: The volume you're attaching. :type volume: :class:`StorageVolume` :return: Instance of class ``StorageVolume`` :rtype: :class:`StorageVolume` """ action = node.extra['href'] + '/volumes' body = { 'id': volume.id } data = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_volume(data.object, data.headers) def create_volume( self, size, image, ex_datacenter, name=None, ex_type=None, ex_bus_type=None, ex_ssh_keys=None, ex_password=None, ex_availability_zone=None ): """ Creates a volume. :param size: The size of the volume in GB. :type size: ``int`` :param image: The OS image for the volume. :type image: :class:`NodeImage` :param ex_datacenter: The datacenter you're placing the storage in. (req) :type ex_datacenter: :class:`Datacenter` :param name: The name to be given to the volume. :param name: ``str`` :param ex_type: The type to be given to the volume (SSD or HDD). :param ex_type: ``str`` :param ex_bus_type: Bus type. Either IDE or VIRTIO (default). :type ex_bus_type: ``str`` :param ex_ssh_keys: Optional SSH keys. :type ex_ssh_keys: ``dict`` :param ex_password: Optional password for root. :type ex_password: ``str`` :param ex_availability_zone: Volume Availability Zone. :type ex_availability_zone: ``str`` :return: Instance of class ``StorageVolume`` :rtype: :class:`StorageVolume` """ if not ex_datacenter: raise ValueError('You need to specify a data center' ' to attach this volume to.') if not image: raise ValueError('You need to specify an image' ' to create this volume from.') if image.extra['image_type'] != 'HDD': raise ValueError('Invalid type of {image_type} specified for ' '{image_name}, which needs to be of type HDD' .format(image_type=image.extra['image_type'], image_name=image.name)) if ex_datacenter.extra['location'] != image.extra['location']: raise ValueError( 'The image {image_name} ' '(location: {image_location}) you specified ' 'is not available at the data center ' '{datacenter_name} ' '(location: {datacenter_location}).' .format( image_name=image.extra['name'], datacenter_name=ex_datacenter.extra['name'], image_location=image.extra['location'], datacenter_location=ex_datacenter.extra['location'] ) ) action = ex_datacenter.href + '/volumes' body = { 'properties': { 'size': size, 'image': image.id } } if name is not None: body['properties']['name'] = name if ex_type is not None: body['properties']['type'] = ex_type if ex_bus_type is not None: body['properties']['bus'] = ex_bus_type if ex_ssh_keys is not None: body['properties']['sshKeys'] = ex_ssh_keys if ex_password is not None: body['properties']['imagePassword'] = ex_password if ex_availability_zone is not None: body['properties']['availabilityZone'] = ex_availability_zone response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_volume(response.object, response.headers) def detach_volume(self, node, volume): """ Detaches a volume. :param node: The node to which you're detaching the volume. :type node: :class:`Node` :param volume: The volume you're detaching. :type volume: :class:`StorageVolume` :rtype: :``bool`` """ action = node.extra['href'] + '/volumes/{volume_id}'.format( volume_id=volume.id ) self.connection.request( action=action, method='DELETE', with_full_url=True ) return True def destroy_volume(self, volume): """ Destroys a volume. :param volume: The volume you're destroying. :type volume: :class:`StorageVolume` :rtype: : ``bool`` """ action = volume.extra['href'] self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ Volume snapshot functions """ def list_snapshots(self): """ Fetches as a list of all snapshots :return: ``list`` of class ``VolumeSnapshot`` :rtype: `list` """ response = self.connection.request( action='/snapshots', params={'depth': 3}, method='GET' ) return self._to_snapshots(response.object) def create_volume_snapshot(self, volume): """ Creates a snapshot for a volume :param volume: The volume you're creating a snapshot for. :type volume: :class:`StorageVolume` :return: Instance of class ``VolumeSnapshot`` :rtype: :class:`VolumeSnapshot` """ action = volume.extra['href'] + '/create-snapshot' response = self.connection.request( action=action, headers={ 'Content-Type': 'application/x-www-form-urlencoded' }, method='POST', with_full_url=True ) return self._to_snapshot(response.object, response.headers) def destroy_volume_snapshot(self, snapshot): """ Delete a snapshot :param snapshot: The snapshot you wish to delete. :type: snapshot: :class:`VolumeSnapshot` :rtype ``bool`` """ action = snapshot.extra['href'] self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ Extension Functions """ """ Server Extension Functions """ def ex_stop_node(self, node): """ Stops a node. This also deallocates the public IP space. :param node: The node you wish to halt. :type node: :class:`Node` :rtype: : ``bool`` """ action = node.extra['href'] + '/stop' self.connection.request( action=action, method='POST', with_full_url=True ) return True def ex_start_node(self, node): """ Starts a node. :param node: The node you wish to start. :type node: :class:`Node` :rtype: ``bool`` """ action = node.extra['href'] + '/start' self.connection.request( action=action, method='POST', with_full_url=True ) return True def ex_list_availability_zones(self): """ Returns a list of availability zones. :return: ``list`` of :class:`ProfitBricksAvailabilityZone` :rtype: ``list`` """ availability_zones = [] for key, values in self.AVAILABILITY_ZONE.items(): name = copy.deepcopy(values)["name"] availability_zone = ProfitBricksAvailabilityZone( name=name ) availability_zones.append(availability_zone) return availability_zones def ex_list_attached_volumes(self, node): """ Returns a list of attached volumes for a server :param node: The node with the attached volumes. :type node: :class:`Node` :return: ``list`` of :class:`StorageVolume` :rtype: ``list`` """ action = node.extra['entities']['volumes']['href'] response = self.connection.request( action=action, params={'depth': 3}, method='GET', with_full_url=True ) return self._to_volumes(response.object) def ex_describe_node( self, ex_href=None, ex_datacenter_id=None, ex_node_id=None ): """ Fetches a node directly by href or by a combination of the datacenter ID and the server ID. :param ex_href: The href (url) of the node you wish to describe. :type ex_href: ``str`` :param ex_datacenter_id: The ID for the data center. :type ex_datacenter_id: ``str`` :param ex_node_id: The ID for the node (server). :type ex_node_id: ``str`` :return: Instance of class ``Node`` :rtype: :class:`Node` """ use_full_url = True if ex_href is None: if ex_datacenter_id is None or ex_node_id is None: raise ValueError( 'IDs for the data center and node are required.' ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}/' 'servers/{server_id}' ).format( datacenter_id=ex_datacenter_id, server_id=ex_node_id ) response = self.connection.request( action=ex_href, method='GET', params={'depth': 3}, with_full_url=use_full_url ) return self._to_node(response.object) def ex_update_node(self, node, name=None, cores=None, ram=None, availability_zone=None, ex_licence_type=None, ex_boot_volume=None, ex_boot_cdrom=None, ex_cpu_family=None): """ Updates a node. :param node: The node you wish to update. :type node: :class:`Node` :param name: The new name for the node. :type name: ``str`` :param cores: The number of CPUs the node should have. :type cores: : ``int`` :param ram: The amount of ram the node should have. :type ram: : ``int`` :param availability_zone: Update the availability zone. :type availability_zone: :class:`ProfitBricksAvailabilityZone` :param ex_licence_type: Licence type (WINDOWS, LINUX, OTHER). :type ex_licence_type: ``str`` :param ex_boot_volume: Setting the new boot (HDD) volume. :type ex_boot_volume: :class:`StorageVolume` :param ex_boot_cdrom: Setting the new boot (CDROM) volume. :type ex_boot_cdrom: :class:`StorageVolume` :param ex_cpu_family: CPU family (INTEL_XEON, AMD_OPTERON). :type ex_cpu_family: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ action = node.extra['href'] body = {} if name is not None: body['name'] = name if cores is not None: body['cores'] = cores if ram is not None: body['ram'] = ram if availability_zone is not None: body['availabilityZone'] = availability_zone.name if ex_licence_type is not None: body['licencetype'] = ex_licence_type if ex_boot_volume is not None: body['bootVolume'] = ex_boot_volume.id if ex_boot_cdrom is not None: body['bootCdrom'] = ex_boot_cdrom.id if ex_cpu_family is not None: body['allowReboot'] = True body['cpuFamily'] = ex_cpu_family response = self.connection.request( action=action, data=body, headers={ 'Content-Type': 'application/json' }, method='PATCH', with_full_url=True ) return self._to_node(response.object, response.headers) """ Data center Extension Functions """ def ex_create_datacenter( self, name, location, description=None ): """ Creates a datacenter. ProfitBricks has a concept of datacenters. These represent buckets into which you can place various compute resources. :param name: The datacenter name. :type name: : ``str`` :param location: instance of class ``NodeLocation``. :type location: : ``NodeLocation`` :param description: The datacenter description. :type description: : ``str`` :return: Instance of class ``Datacenter`` :rtype: :class:`Datacenter` """ body = { 'properties': { 'name': name, 'location': location.id } } if description is not None: body['properties']['description'] = description body['entities'] = defaultdict(dict) body['entities']['lans']['items'] = [ { 'properties': { 'name': name + ' - public lan', 'public': True } }, { 'properties': { 'name': name + ' - private lan', 'public': False } } ] response = self.connection.request( action='datacenters', headers={ 'Content-Type': 'application/json' }, data=body, method='POST' ) return self._to_datacenter(response.object, response.headers) def ex_destroy_datacenter(self, datacenter): """ Destroys a datacenter. :param datacenter: The DC you're destroying. :type datacenter: :class:`Datacenter` :rtype: : ``bool`` """ action = datacenter.href self.connection.request( action=action, method='DELETE', with_full_url=True ) return True def ex_describe_datacenter(self, ex_href=None, ex_datacenter_id=None): """ Fetches the details for a data center. :param ex_href: The href for the data center you are describing. :type ex_href: ``str`` :param ex_datacenter_id: The ID for the data cente you are describing. :type ex_datacenter_id: ``str`` :return: Instance of class ``Datacenter`` :rtype: :class:`Datacenter` """ use_full_url = True if ex_href is None: if ex_datacenter_id is None: raise ValueError( 'The data center ID is required.' ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}' ).format( datacenter_id=ex_datacenter_id ) response = self.connection.request( action=ex_href, method='GET', params={'depth': 3}, with_full_url=use_full_url ) return self._to_datacenter(response.object) def ex_list_datacenters(self): """ Lists all datacenters. :return: ``list`` of :class:`DataCenter` :rtype: ``list`` """ response = self.connection.request( action='datacenters', params={'depth': 2}, method='GET' ) return self._to_datacenters(response.object) def ex_rename_datacenter(self, datacenter, name): """ Update a datacenter. :param datacenter: The DC you are renaming. :type datacenter: :class:`Datacenter` :param name: The DC name. :type name: : ``str`` :return: Instance of class ``Datacenter`` :rtype: :class:`Datacenter` """ action = datacenter.href body = { 'name': name } response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_datacenter(response.object, response.headers) """ Image Extension Functions """ def ex_describe_image(self, ex_href=None, ex_image_id=None): """ Describe a ProfitBricks image :param ex_href: The href for the image you are describing :type ex_href: ``str`` :param ex_image_id: The ID for the image you are describing :type ex_image_id: ``str`` :return: Instance of class ``Image`` :rtype: :class:`Image` """ use_full_url = True if ex_href is None: if ex_image_id is None: raise ValueError( 'The image ID is required.' ) else: use_full_url = False ex_href = ( 'images/{image_id}' ).format( image_id=ex_image_id ) response = self.connection.request( action=ex_href, method='GET', with_full_url=use_full_url ) return self._to_image(response.object) def ex_delete_image(self, image): """ Delete a private image :param image: The private image you are deleting. :type image: :class:`NodeImage` :rtype: : ``bool`` """ self.connection.request( action=image.extra['href'], method='DELETE', with_full_url=True ) return True def ex_update_image( self, image, name=None, description=None, licence_type=None, cpu_hot_plug=None, cpu_hot_unplug=None, ram_hot_plug=None, ram_hot_unplug=None, nic_hot_plug=None, nic_hot_unplug=None, disc_virtio_hot_plug=None, disc_virtio_hot_unplug=None, disc_scsi_hot_plug=None, disc_scsi_hot_unplug=None ): """ Update a private image :param image: The private image you are deleting. :type image: :class:`NodeImage` :return: Instance of class ``Image`` :rtype: :class:`Image` """ action = image.extra['href'] body = {} if name is not None: body['name'] = name if description is not None: body['description'] = description if licence_type is not None: body['licence_type'] = licence_type if cpu_hot_plug is not None: body['cpu_hot_plug'] = cpu_hot_plug if cpu_hot_unplug is not None: body['cpu_hot_unplug'] = cpu_hot_unplug if ram_hot_plug is not None: body['ram_hot_plug'] = ram_hot_plug if ram_hot_unplug is not None: body['ram_hot_unplug'] = ram_hot_unplug if nic_hot_plug is not None: body['nic_hot_plug'] = nic_hot_plug if nic_hot_unplug is not None: body['nic_hot_unplug'] = nic_hot_unplug if disc_virtio_hot_plug is not None: body['disc_virtio_hot_plug'] = disc_virtio_hot_plug if disc_virtio_hot_unplug is not None: body['disc_virtio_hot_unplug'] = disc_virtio_hot_unplug if disc_scsi_hot_plug is not None: body['disc_scsi_hot_plug'] = disc_scsi_hot_plug if disc_scsi_hot_unplug is not None: body['disc_scsi_hot_unplug'] = disc_scsi_hot_unplug response = self.connection.request( action=action, headers={ 'Content-type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_image(response.object, response.headers) """ Location Extension Functions """ def ex_describe_location(self, ex_href=None, ex_location_id=None): """ Fetch details for a ProfitBricks location. :param ex_href: The href for the location you are describing. :type ex_href: ``str`` :param ex_location_id: The id for the location you are describing ('de/fra', 'de/fkb', 'us/las') :type ex_location_id: ``str`` :return: Instance of class ``NodeLocation`` :rtype: :class:`NodeLocation` """ use_full_url = True if ex_href is None: if ex_location_id is None: raise ValueError( 'The loctation ID is required.' ) else: use_full_url = False ex_href = ( 'locations/{location_id}' ).format( location_id=ex_location_id ) response = self.connection.request( action=ex_href, method='GET', with_full_url=use_full_url ) return self._to_location(response.object) """ Network Interface Extension Functions """ def ex_list_network_interfaces(self): """ Fetch a list of all network interfaces from all data centers. :return: ``list`` of class ``ProfitBricksNetworkInterface`` :rtype: `list` """ nodes = self.list_nodes() nics = list() for node in nodes: action = node.extra['entities']['nics']['href'] nics += self._to_interfaces( self.connection.request( action=action, params={'depth': 1}, method='GET', with_full_url=True ).object) return nics def ex_describe_network_interface( self, ex_href=None, ex_datacenter_id=None, ex_server_id=None, ex_nic_id=None ): """ Fetch information on a network interface. :param ex_href: The href of the NIC you wish to describe. :type ex_href: ``str`` :param ex_datacenter_id: The ID of parent data center of the NIC you wish to describe. :type ex_datacenter_id: ``str`` :param ex_server_id: The server the NIC is connected to. :type ex_server_id: ``str`` :param ex_nic_id: The ID of the NIC :type ex_nic_id: ``str`` :return: Instance of class ``ProfitBricksNetworkInterface`` :rtype: :class:`ProfitBricksNetworkInterface` """ use_full_url = True if ex_href is None: if ( ex_datacenter_id is None or ex_server_id is None or ex_nic_id is None ): raise ValueError( ( 'IDs are required for the data center', 'server and network interface.' ) ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}' '/servers/{server_id}' '/nics/{nic_id}' ).format( datacenter_id=ex_datacenter_id, server_id=ex_server_id, nic_id=ex_nic_id ) response = self.connection.request( action=ex_href, method='GET', with_full_url=use_full_url ) return self._to_interface(response.object) def ex_create_network_interface(self, node, lan_id=None, ips=None, nic_name=None, dhcp_active=True): """ Creates a network interface. :param lan_id: The ID for the LAN. :type lan_id: : ``int`` :param ips: The IP addresses for the NIC. :type ips: ``list`` :param nic_name: The name of the NIC, e.g. PUBLIC. :type nic_name: ``str`` :param dhcp_active: Set to false to disable. :type dhcp_active: ``bool`` :return: Instance of class ``ProfitBricksNetworkInterface`` :rtype: :class:`ProfitBricksNetworkInterface` """ if lan_id is not None: lan_id = str(lan_id) else: lan_id = str(1) action = node.extra['href'] + '/nics' body = { 'properties': { 'lan': lan_id, 'dhcp': dhcp_active } } if ips is not None: body['properties']['ips'] = ips if nic_name is not None: body['properties']['name'] = nic_name response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_interface(response.object, response.headers) def ex_update_network_interface(self, network_interface, name=None, lan_id=None, ips=None, dhcp_active=None): """ Updates a network interface. :param network_interface: The network interface being updated. :type network_interface: :class:`ProfitBricksNetworkInterface` :param name: The name of the NIC, e.g. PUBLIC. :type name: ``str`` :param lan_id: The ID for the LAN. :type lan_id: : ``int`` :param ips: The IP addresses for the NIC as a list. :type ips: ``list`` :param dhcp_active: Set to false to disable. :type dhcp_active: ``bool`` :return: Instance of class ``ProfitBricksNetworkInterface`` :rtype: :class:`ProfitBricksNetworkInterface` """ if lan_id: lan_id = str(lan_id) action = network_interface.href body = {} if name is not None: body['name'] = name if lan_id is not None: body['lan'] = str(lan_id) if ips is not None: body['ips'] = ips if dhcp_active is not None: body['dhcp'] = dhcp_active response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_interface(response.object, response.headers) def ex_destroy_network_interface(self, network_interface): """ Destroy a network interface. :param network_interface: The NIC you wish to describe. :type network_interface: :class:`ProfitBricksNetworkInterface` :rtype: : ``bool`` """ action = network_interface.href self.connection.request( action=action, method='DELETE', with_full_url=True ) return True def ex_set_inet_access(self, network_interface, internet_access=True): """ Add/remove public internet access to an interface. :param network_interface: The NIC you wish to update. :type network_interface: :class:`ProfitBricksNetworkInterface` :return: Instance of class ``ProfitBricksNetworkInterface`` :rtype: :class:`ProfitBricksNetworkInterface` """ action = network_interface.href body = { 'nat': internet_access } response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_interface(response.object, response.headers) """ Firewall Rule Extension Functions """ def ex_list_firewall_rules(self, network_interface): """ Fetch firewall rules for a network interface. :param network_interface: The network interface. :type network_interface: :class:`ProfitBricksNetworkInterface` :return: ``list`` of class ``ProfitBricksFirewallRule`` :rtype: `list` """ action = network_interface.href + '/firewallrules' response = self.connection.request( action=action, method='GET', params={'depth': 3}, with_full_url=True ) return self._to_firewall_rules(response.object) def ex_describe_firewall_rule( self, ex_href=None, ex_datacenter_id=None, ex_server_id=None, ex_nic_id=None, ex_firewall_rule_id=None ): """ Fetch data for a firewall rule. :param href: The href of the firewall rule you wish to describe. :type href: ``str`` :param ex_datacenter_id: The ID of parent data center of the NIC you wish to describe. :type ex_datacenter_id: ``str`` :param ex_server_id: The server the NIC is connected to. :type ex_server_id: ``str`` :param ex_nic_id: The ID of the NIC. :type ex_nic_id: ``str`` :param ex_firewall_rule_id: The ID of the firewall rule. :type ex_firewall_rule_id: ``str`` :return: Instance class ``ProfitBricksFirewallRule`` :rtype: :class:`ProfitBricksFirewallRule` """ use_full_url = True if ex_href is None: if ( ex_datacenter_id is None or ex_server_id is None or ex_nic_id is None or ex_firewall_rule_id is None ): raise ValueError( ( 'IDs are required for the data ' 'center, server, network interface', 'and firewall rule.' ) ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}' '/servers/{server_id}' '/nics/{nic_id}' '/firewallrules/{firewall_rule_id}' ).format( datacenter_id=ex_datacenter_id, server_id=ex_server_id, nic_id=ex_nic_id, firewall_rule_id=ex_firewall_rule_id ) response = self.connection.request( action=ex_href, method='GET', with_full_url=use_full_url ) return self._to_firewall_rule(response.object) def ex_create_firewall_rule(self, network_interface, protocol, name=None, source_mac=None, source_ip=None, target_ip=None, port_range_start=None, port_range_end=None, icmp_type=None, icmp_code=None): """ Create a firewall rule for a network interface. :param network_interface: The network interface to attach the firewall rule to. :type: network_interface: :class:`ProfitBricksNetworkInterface` :param protocol: The protocol for the rule (TCP, UDP, ICMP, ANY) :type protocol: ``str`` :param name: The name for the firewall rule :type name: ``str`` :param source_mac: Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff. Value null allows all source MAC address. :type source_mac: ``str`` :param source_ip: Only traffic originating from the respective IPv4 address is allowed. Value null allows all source IPs. :type source_ip: ``str`` :param target_ip: In case the target NIC has multiple IP addresses, only traffic directed to the respective IP address of the NIC is allowed. Value null allows all target IPs. :type target_ip: ``str`` :param port_range_start: Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. type: port_range_start: ``int`` :param port_range_end: Defines the end range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. type: port_range_end: ``int`` :param icmp_type: Defines the allowed type (from 0 to 254) if the protocol ICMP is chosen. Value null allows all types. :type icmp_type: ``int`` :param icmp_code: Defines the allowed code (from 0 to 254) if protocol ICMP is chosen. Value null allows all codes. :type icmp_code: ``int`` :return: Instance class ``ProfitBricksFirewallRule`` :rtype: :class:`ProfitBricksFirewallRule` """ action = network_interface.href + '/firewallrules' body = { 'properties': { 'protocol': protocol } } if name is not None: body['properties']['name'] = name if source_mac is not None: body['properties']['sourceMac'] = source_mac if source_ip is not None: body['properties']['sourceIp'] = source_ip if target_ip is not None: body['properties']['targetIp'] = target_ip if port_range_start is not None: body['properties']['portRangeStart'] = str(port_range_start) if port_range_end is not None: body['properties']['portRangeEnd'] = str(port_range_end) if icmp_type is not None: body['properties']['icmpType'] = str(icmp_type) if icmp_code is not None: body['properties']['icmpType'] = str(icmp_code) response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_firewall_rule(response.object, response.headers) def ex_update_firewall_rule(self, firewall_rule, name=None, source_mac=None, source_ip=None, target_ip=None, port_range_start=None, port_range_end=None, icmp_type=None, icmp_code=None): """ Update a firewall rule :param firewall_rule: The firewall rule to update :type: firewall_rule: :class:`ProfitBricksFirewallRule` :param name: The name for the firewall rule :type name: ``str`` :param source_mac: Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff. Value null allows all source MAC address. :type source_mac: ``str`` :param source_ip: Only traffic originating from the respective IPv4 address is allowed. Value null allows all source IPs. :type source_ip: ``str`` :param target_ip: In case the target NIC has multiple IP addresses, only traffic directed to the respective IP address of the NIC is allowed. Value null allows all target IPs. :type target_ip: ``str`` :param port_range_start: Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. type: port_range_start: ``int`` :param port_range_end: Defines the end range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. type: port_range_end: ``int`` :param icmp_type: Defines the allowed type (from 0 to 254) if the protocol ICMP is chosen. Value null allows all types. :type icmp_type: ``int`` :param icmp_code: Defines the allowed code (from 0 to 254) if protocol ICMP is chosen. Value null allows all codes. :type icmp_code: ``int`` :return: Instance class ``ProfitBricksFirewallRule`` :rtype: :class:`ProfitBricksFirewallRule` """ action = firewall_rule.href body = {} if name is not None: body['name'] = name if source_mac is not None: body['sourceMac'] = source_mac if source_ip is not None: body['sourceIp'] = source_ip if target_ip is not None: body['targetIp'] = target_ip if port_range_start is not None: body['portRangeStart'] = str(port_range_start) if port_range_end is not None: body['portRangeEnd'] = str(port_range_end) if icmp_type is not None: body['icmpType'] = str(icmp_type) if icmp_code is not None: body['icmpType'] = str(icmp_code) response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_firewall_rule(response.object, response.headers) def ex_delete_firewall_rule(self, firewall_rule): """ Delete a firewall rule :param firewall_rule: The firewall rule to delete. :type: firewall_rule: :class:`ProfitBricksFirewallRule` :rtype ``bool`` """ action = firewall_rule.href self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ LAN extension functions """ def ex_list_lans(self, datacenter=None): """ List local area network on: - a datacenter if one is specified - all datacenters if none specified :param datacenter: The DC you are renaming. :type datacenter: :class:`Datacenter` :return: ``list`` of class ``ProfitBricksLan`` :rtype: `list` """ if datacenter is not None: action = datacenter.extra['entities']['lans']['href'] request = self.connection.request( action=action, params={'depth': 3}, method='GET', with_full_url=True ) lans = self._to_lans(request.object) else: datacenters = self.ex_list_datacenters() lans = [] for datacenter in datacenters: action = datacenter.extra['entities']['lans']['href'] request = self.connection.request( action=action, params={'depth': 3}, method='GET', with_full_url=True ) lans += self._to_lans(request.object) return lans def ex_create_lan(self, datacenter, is_public=False, nics=None): """ Create and attach a Lan to a data center. :param datacenter: The DC you are renaming. :type datacenter: :class:`Datacenter` :param is_public: True if the Lan is to have internet access. :type is_public: ``bool`` :param nics: Optional network interfaces to attach to the lan. :param nics: ``list`` of class ``ProfitBricksNetworkInterface`` :return: Instance class ``ProfitBricksLan`` :rtype: :class:`ProfitBricksLan` """ action = datacenter.extra['entities']['lans']['href'] body = { 'properties': { 'name': 'LAN - {datacenter_name}'.format( datacenter_name=datacenter.name ), 'public': is_public } } if nics is not None: body['entities'] = defaultdict(dict) body['entities']['nics']['items'] = [ {'id': nic.id} for nic in nics ] request = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_lan(request.object, request.headers) def ex_describe_lan( self, ex_href=None, ex_datacenter_id=None, ex_lan_id=None ): """ Fetch data on a local area network :param ex_href: The href of the lan you wish to describe. :type ex_href: ``str`` :param ex_datacenter_id: The ID of the parent datacenter for the LAN. :type ex_datacenter_id: ``str`` :param ex_lan_id: The ID of LAN. :type ex_lan_id: ``str`` :return: Instance class ``ProfitBricksLan`` :rtype: :class:`ProfitBricksLan` """ use_full_url = True if ex_href is None: if ex_datacenter_id is None or ex_lan_id is None: raise ValueError( 'IDs for the data center and LAN are required.' ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}/' 'lans/{lan_id}' ).format( datacenter_id=ex_datacenter_id, lan_id=ex_lan_id ) response = self.connection.request( action=ex_href, method='GET', params={'depth': 1}, with_full_url=use_full_url ) return self._to_lan(response.object) def ex_update_lan(self, lan, is_public, name=None): """ Update a local area network :param lan: The lan you wish to update. :type: lan: :class:`ProfitBricksLan` :param is_public: Boolean indicating if the lan faces the public internet. :type is_public: ``bool`` :param name: The name of the lan. :type name: ``str`` :return: Instance class ``ProfitBricksLan`` :rtype: :class:`ProfitBricksLan` """ action = lan.href body = { 'public': is_public } if name is not None: body['name'] = name request = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_lan(request.object, request.headers) def ex_delete_lan(self, lan): """ Delete a local area network :param lan: The lan you wish to delete. :type: lan: :class:`ProfitBrickLan` :rtype ``bool`` """ action = lan.href self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ Volume extension functions """ def ex_update_volume( self, volume, ex_storage_name=None, size=None, ex_bus_type=None ): """ Updates a volume. :param volume: The volume you're updating. :type volume: :class:`StorageVolume` :param ex_storage_name: The name of the volume. :type ex_storage_name: ``str`` :param size: The desired size. :type size: ``int`` :param ex_bus_type: Volume bus type (VIRTIO, IDE). :type ex_bus_type: ``str`` :return: Instance of class ``StorageVolume`` :rtype: :class:`StorageVolume` """ if not ex_storage_name: ex_storage_name = volume.name if not size: size = str(volume.size) action = volume.extra['href'] body = { 'name': ex_storage_name, 'size': size } if ex_bus_type is not None: body['bus'] = ex_bus_type response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_volume(response.object, response.headers) def ex_describe_volume( self, ex_href=None, ex_datacenter_id=None, ex_volume_id=None ): """ Fetches and returns a volume :param ex_href: The full href (url) of the volume. :type ex_href: ``str`` :param ex_datacenter_id: The ID of the parent datacenter for the volume. :type ex_datacenter_id: ``str`` :param ex_volume_id: The ID of the volume. :type ex_volume_id: ``str`` :return: Instance of class ``StorageVolume`` :rtype: :class:`StorageVolume` """ use_full_url = True if ex_href is None: if ex_datacenter_id is None or ex_volume_id is None: raise ValueError( 'IDs for the data center and volume are required.' ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}/' 'volumes/{volume_id}' ).format( datacenter_id=ex_datacenter_id, volume_id=ex_volume_id ) response = self.connection.request( action=ex_href, method='GET', params={'depth': 3}, with_full_url=use_full_url ) return self._to_volume(response.object) def ex_restore_volume_snapshot(self, volume, snapshot): """ Restores a snapshot for a volume :param volume: The volume you're restoring the snapshot to. :type volume: :class:`StorageVolume` :param snapshot: The snapshot you're restoring to the volume. :type snapshot: :class:`ProfitBricksSnapshot` :rtype ``bool`` """ action = volume.extra['href'] + '/restore-snapshot' data = {'snapshotId': snapshot.id} body = urllib.urlencode(data) self.connection.request( action=action, headers={ 'Content-Type': 'application/x-www-form-urlencoded' }, data=body, method='POST', with_full_url=True ) return True """ Volume snapshot extension functions """ def ex_describe_snapshot(self, ex_href=None, ex_snapshot_id=None): """ Fetches and returns a volume snapshot :param ex_href: The full href (url) of the snapshot. :type ex_href: ``str`` :param ex_snapshot_id: The ID of the snapshot. :type ex_snapshot_id: ``str`` :return: Instance of class ``ProfitBricksSnapshot`` :rtype: :class:`ProfitBricksSnapshot` """ use_full_url = True if ex_href is None: if ex_snapshot_id is None: raise ValueError( 'The snapshot ID is required.' ) else: use_full_url = False ex_href = ( 'snapshots/{snapshot_id}' ).format( snapshot_id=ex_snapshot_id ) response = self.connection.request( action=ex_href, params={'depth': 3}, method='GET', with_full_url=use_full_url ) return self._to_snapshot(response.object) def ex_update_snapshot( self, snapshot, name=None, description=None, cpu_hot_plug=None, cpu_hot_unplug=None, ram_hot_plug=None, ram_hot_unplug=None, nic_hot_plug=None, nic_hot_unplug=None, disc_virtio_hot_plug=None, disc_virtio_hot_unplug=None, disc_scsi_hot_plug=None, disc_scsi_hot_unplug=None, licence_type=None ): """ Updates a snapshot :param snapshot: The snapshot you're restoring to the volume. :type snapshot: :class:`VolumeSnapshot` :param name: The snapshot name :type name: `str` :param description: The snapshot description :type description: `str` :param cpu_hot_plug: Snapshot CPU is hot pluggalbe :type cpu_hot_plug: `str` :param cpu_hot_unplug: Snapshot CPU is hot unpluggalbe :type cpu_hot_unplug: `str` :param ram_hot_plug: Snapshot RAM is hot pluggalbe :type ram_hot_plug: `str` :param ram_hot_unplug: Snapshot RAM is hot unpluggalbe :type ram_hot_unplug: `str` :param nic_hot_plug: Snapshot Network Interface is hot pluggalbe :type nic_hot_plug: `str` :param nic_hot_unplug: Snapshot Network Interface is hot unpluggalbe :type nic_hot_unplug: `str` :param disc_virtio_hot_plug: Snapshot VIRTIO disk is hot pluggalbe :type disc_virtio_hot_plug: `str` :param disc_virtio_hot_unplug: Snapshot VIRTIO disk is hot unpluggalbe :type disc_virtio_hot_unplug: `str` :param disc_scsi_hot_plug: Snapshot SCSI disk is hot pluggalbe :type disc_scsi_hot_plug: `str` :param disc_scsi_hot_unplug: Snapshot SCSI disk is hot unpluggalbe :type disc_scsi_hot_unplug: `str` :param licence_type: The snapshot licence_type :type licence_type: `str` :return: Instance of class ``VolumeSnapshot`` :rtype: :class:`VolumeSnapshot` """ action = snapshot.extra['href'] body = {} if name is not None: body['name'] = name if description is not None: body['description'] = description if cpu_hot_plug is not None: body['cpuHotPlug'] = cpu_hot_plug if cpu_hot_unplug is not None: body['cpuHotUnplug'] = cpu_hot_unplug if ram_hot_plug is not None: body['ramHotPlug'] = ram_hot_plug if ram_hot_unplug is not None: body['ramHotUnplug'] = ram_hot_unplug if nic_hot_plug is not None: body['nicHotPlug'] = nic_hot_plug if nic_hot_unplug is not None: body['nicHotUnplug'] = nic_hot_unplug if disc_virtio_hot_plug is not None: body['discVirtioHotPlug'] = disc_virtio_hot_plug if disc_virtio_hot_unplug is not None: body['discVirtioHotUnplug'] = disc_virtio_hot_unplug if disc_scsi_hot_plug is not None: body['discScsiHotPlug'] = disc_scsi_hot_plug if disc_scsi_hot_unplug is not None: body['discScsiHotUnplug'] = disc_scsi_hot_unplug if licence_type is not None: body['licenceType'] = licence_type response = self.connection.request( action=action, params={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_snapshot(response.object, response.headers) """ Load balancer extension functions """ def ex_list_load_balancers(self): """ Fetches as a list of load balancers :return: ``list`` of class ``ProfitBricksLoadBalancer`` :rtype: `list` """ datacenters = self.ex_list_datacenters() load_balancers = list() for datacenter in datacenters: extra = datacenter.extra load_balancers_href = extra['entities']['loadbalancers']['href'] response = self.connection.request( action=load_balancers_href, params={'depth': 3}, method='GET', with_full_url=True ) mapped_load_balancers = self._to_load_balancers(response.object) load_balancers += mapped_load_balancers return load_balancers def ex_describe_load_balancer( self, ex_href=None, ex_datacenter_id=None, ex_load_balancer_id=None ): """ Fetches and returns a load balancer :param href: The full href (url) of the load balancer. :type href: ``str`` :param ex_datacenter_id: The ID of the parent data center for the load balancer. :type ex_datacenter_id: ``str`` :param ex_load_balancer_id: The load balancer ID. :type ex_load_balancer_id: ``str`` :return: Instance of class ``ProfitBricksLoadBalancer`` :rtype: :class:`ProfitBricksLoadBalancer` """ use_full_url = True if ex_href is None: if ( ex_datacenter_id is None or ex_load_balancer_id is None ): raise ValueError( ( 'IDs for the data center and ' 'load balancer are required.' ) ) else: use_full_url = False ex_href = ( 'datacenters/{datacenter_id}/' 'loadbalancers/{load_balancer_id}' ).format( datacenter_id=ex_datacenter_id, load_balancer_id=ex_load_balancer_id ) response = self.connection.request( action=ex_href, params={'depth': 3}, method='GET', with_full_url=use_full_url ) return self._to_load_balancer(response.object) def ex_create_load_balancer( self, datacenter, name=None, ip=None, dhcp=None, nics=None ): """ Create and attach a load balancer to a data center. :param datacenter: The DC you are renaming. :type datacenter: :class:`Datacenter` :param name: Load balancer name. :type name: ``str`` :param ip: Load balancer IPV4 address. :type ip: ``str`` :param dhcp: If true, the load balancer will reserve an IP address using DHCP. :type dhcp: ``bool`` :param nics: Optional network interfaces taking part in load balancing. :param nics: ``list`` of class ``ProfitBricksNetworkInterface`` :return: Instance class ``ProfitBricksLoadBalancer`` :rtype: :class:`ProfitBricksLoadBalancer` """ action = datacenter.extra['entities']['loadbalancers']['href'] body = { 'properties': { 'name': name or 'Load Balancer - {datacenter_name}' .format(datacenter_name=datacenter.name) } } if ip is not None: body['properties']['ip'] = ip if dhcp is not None: body['properties']['dhcp'] = dhcp if nics is not None: body['entities'] = defaultdict(dict) body['entities']['balancednics']['items'] = [ {'id': nic.id} for nic in nics ] response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return self._to_load_balancer(response.object, response.headers) def ex_update_load_balancer( self, load_balancer, name=None, ip=None, dhcp=None ): """ Update a load balancer :param load_balancer: The load balancer you wish to update. :type: load_balancer: :class:`ProfitBricksLoadBalancer` :param name: The name of the load balancer. :type name: ``str`` :param ip: The IPV4 address of the load balancer. :type ip: ``str`` :param dhcp: If true, the load balancer will reserve an IP address using DHCP. :type dhcp: ``bool`` :return: Instance class ``ProfitBricksLoadBalancer`` :rtype: :class:`ProfitBricksLoadBalancer` """ action = load_balancer.href body = {} if name is not None: body['name'] = name if ip is not None: body['ip'] = ip if dhcp is not None: body['dhcp'] = dhcp response = self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='PATCH', with_full_url=True ) return self._to_load_balancer(response.object, response.headers) def ex_list_load_balanced_nics(self, load_balancer): """ List balanced network interfaces for a load balancer. :param load_balancer: The load balancer you wish to update. :type: load_balancer: :class:`ProfitBricksLoadBalancer` :return: ``list`` of class ``ProfitBricksNetorkInterface`` :rtype: `list` """ action = load_balancer.extra['entities']['balancednics']['href'] response = self.connection.request( action=action, params={'depth': 3}, method='GET', with_full_url=True ) return self._to_interfaces(response.object) def ex_describe_load_balanced_nic( self, ex_href=None, ex_datacenter_id=None, ex_server_id=None, ex_nic_id=None ): """ Fetch information on a load balanced network interface. :param ex_href: The href of the load balanced NIC you wish to describe. :type ex_href: ``str`` :param ex_datacenter_id: The ID of parent data center of the NIC you wish to describe. :type ex_datacenter_id: ``str`` :param ex_server_id: The server the NIC is connected to. :type ex_server_id: ``str`` :param ex_nic_id: The ID of the NIC :type ex_nic_id: ``str`` :return: Instance of class ``ProfitBricksNetworkInterface`` :rtype: :class:`ProfitBricksNetworkInterface` """ return self.ex_describe_network_interface( ex_href=ex_href, ex_datacenter_id=ex_datacenter_id, ex_server_id=ex_server_id, ex_nic_id=ex_nic_id ) def ex_attach_nic_to_load_balancer( self, load_balancer, network_interface ): """ Attaches a network interface to a load balancer :param load_balancer: The load balancer you wish to attach the network interface to. :type: load_balancer: :class:`ProfitBricksLoadBalancer` :param network_interface: The network interface being attached. :type: network_interface: :class:`ProfitBricksNetworkInterface` :rtype ``bool`` """ action = load_balancer.extra['entities']['balancednics']['href'] body = { 'id': network_interface.id } self.connection.request( action=action, headers={ 'Content-Type': 'application/json' }, data=body, method='POST', with_full_url=True ) return True def ex_remove_nic_from_load_balancer( self, load_balancer, network_interface ): """ Removed a network interface from a load balancer :param load_balancer: The load balancer you wish to remove the network interface from. :type: load_balancer: :class:`ProfitBricksLoadBalancer` :param network_interface: The network interface being removed. :type: network_interface: :class:`ProfitBricksNetworkInterface` :rtype ``bool`` """ action = load_balancer.href + '/balancednics/' + network_interface.id self.connection.request( action=action, method='DELETE', with_full_url=True ) return True def ex_delete_load_balancer(self, load_balancer): """ Delete a load balancer :param load_balancer: The load balancer you wish to delete. :type: load_balancer: :class:`ProfitBricksLoadBalancer` :rtype ``bool`` """ action = load_balancer.href self.connection.request( action=action, method='DELETE', with_full_url=True ) return True """ IP Block extension functions """ def ex_list_ip_blocks(self): """ List all IP blocks :return: ``list`` of class ``ProfitBricksIPBlock`` :rtype: `list` """ response = self.connection.request( action='ipblocks', params={'depth': 3}, method='GET' ) return self._to_ip_blocks(response.object) def ex_create_ip_block(self, location, size, name=None): """ Create an IP block :param location: The location of the IP block. :type location: :class:`NodeLocation` :param size: The size of the IP block. :type size: ``int`` :param name: The name of the IP block. :type name: ``str`` :return: Instance class ``ProfitBricksIPBlock`` :rtype: :class:`ProfitBricksIPBlock` """ body = { 'properties': { 'location': location.id, 'size': size } } if name is not None: body['properties']['name'] = name response = self.connection.request( action='ipblocks', headers={ 'Content-Type': 'application/json' }, data=body, method='POST' ) return self._to_ip_block(response.object, response.headers) def ex_describe_ip_block(self, ex_href=None, ex_ip_block_id=None): """ Fetch an IP block :param ex_href: The href of the IP block. :type ex_href: ``str`` :param ex_ip_block_id: The ID of the IP block. :type ex_ip_block_id: ``str`` :return: Instance class ``ProfitBricksIPBlock`` :rtype: :class:`ProfitBricksIPBlock` """ use_full_url = True if ex_href is None: if ex_ip_block_id is None: raise ValueError( 'The IP block ID is required.' ) else: use_full_url = False ex_href = ( 'ipblocks/{ip_block_id}' ).format( ip_block_id=ex_ip_block_id ) response = self.connection.request( action=ex_href, params={'depth': 3}, method='GET', with_full_url=use_full_url ) return self._to_ip_block(response.object) def ex_delete_ip_block(self, ip_block): """ Delete an IP block :param ip_block: The IP block you wish to delete. :type: ip_block: :class:`ProfitBricksIPBlock` :rtype ``bool`` """ self.connection.request( action=ip_block.href, method='DELETE', with_full_url=True ) return True """ Private Functions """ def _to_ip_blocks(self, object): return [self._to_ip_block( ip_block) for ip_block in object['items']] def _to_ip_block(self, ip_block, headers=None): nested = { 'metadata': ip_block['metadata'] } extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] state = self.NODE_STATE_MAP.get( ip_block['metadata']['state'], NodeState.UNKNOWN ) # self, id, name, href, location, size, ips, state, driver, extra=None return ProfitBricksIPBlock( id=ip_block['id'], name=ip_block['properties']['name'], href=ip_block['href'], location=ip_block['properties']['location'], size=ip_block['properties']['size'], ips=ip_block['properties']['ips'] or [], state=state, driver=self.connection.driver, extra=extra ) def _to_load_balancers(self, object): return [self._to_load_balancer( load_balancer) for load_balancer in object['items']] def _to_load_balancer(self, load_balancer, headers=None): nested = { 'props': load_balancer['properties'], 'metadata': load_balancer['metadata'] } extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' }, 'props': { 'name': 'name', 'ip': 'ip', 'dhcp': 'dhcp' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] if 'entities' in load_balancer: extra['entities'] = load_balancer['entities'] state = self.NODE_STATE_MAP.get( load_balancer['metadata']['state'], NodeState.UNKNOWN ) return ProfitBricksLoadBalancer( id=load_balancer['id'], name=load_balancer['properties']['name'], href=load_balancer['href'], state=state, driver=self.connection.driver, extra=extra ) def _to_snapshots(self, object): return [self._to_snapshot( snapshot) for snapshot in object['items']] def _to_snapshot(self, snapshot, headers=None): nested = { 'props': snapshot['properties'], 'metadata': snapshot['metadata'] } extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' }, 'props': { 'name': 'name', 'description': 'description', 'location': 'location', 'size': 'size', 'cpuHotPlug': 'cpu_hot_plug', 'cpuHotUnplug': 'cpu_hot_unplug', 'ramHotPlug': 'ram_hot_plug', 'ramHotUnplug': 'ram_hot_unplug', 'nicHotPlug': 'nic_hot_plug', 'nicHotUnplug': 'nic_hot_unplug', 'discVirtioHotPlug': 'disc_virtio_hot_plug', 'discVirtioHotUnplug': 'disc_virtio_hot_unplug', 'discScsiHotPlug': 'disc_scsi_hot_plug', 'discScsiHotUnplug': 'disc_scsi_hot_unplug', 'licenceType': 'licence_type' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] state = self.NODE_STATE_MAP.get( snapshot['metadata']['state'], NodeState.UNKNOWN ) extra['href'] = snapshot['href'] return VolumeSnapshot( id=snapshot['id'], driver=self.connection.driver, size=extra['size'], extra=extra, created=extra['created_date'], state=state, name=extra['name'] ) def _to_lans(self, object): return [self._to_lan( lan) for lan in object['items']] def _to_lan(self, lan, headers=None): nested = { 'props': lan['properties'], 'metadata': lan['metadata'] } extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' }, 'props': { 'name': 'name', 'public': 'is_public' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if 'entities' in lan: extra['entities'] = lan['entities'] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] extra['provisioning_state'] = self.PROVISIONING_STATE.get( lan['metadata']['state'], NodeState.UNKNOWN ) state = self.NODE_STATE_MAP.get( lan['metadata']['state'], NodeState.UNKNOWN ) return ProfitBricksLan( id=lan['id'], name=lan['properties']['name'], href=lan['href'], is_public=lan['properties']['public'], state=state, driver=self.connection.driver, extra=extra ) def _to_datacenters(self, object): return [self._to_datacenter( datacenter) for datacenter in object['items']] def _to_datacenter(self, datacenter, headers=None): nested = { 'props': datacenter['properties'], 'metadata': datacenter['metadata'] } if 'entities' in datacenter: nested['entities'] = datacenter['entities'] extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' }, 'props': { 'description': 'description', 'features': 'features', 'location': 'location', 'name': 'name', 'version': 'version' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if 'entities' in datacenter: extra['entities'] = datacenter['entities'] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] extra['provisioning_state'] = self.PROVISIONING_STATE.get( datacenter['metadata']['state'], NodeState.UNKNOWN ) return Datacenter( id=datacenter['id'], href=datacenter['href'], name=datacenter['properties']['name'], version=datacenter['properties']['version'], driver=self.connection.driver, extra=extra ) def _to_images(self, object, image_type=None, is_public=True): if image_type is not None: images = [ image for image in object['items'] if image['properties']['imageType'] == image_type and image['properties']['public'] == is_public ] else: images = [ image for image in object['items'] if image['properties']['public'] == is_public ] return [self._to_image(image) for image in images] def _to_image(self, image, headers=None): nested = { 'props': image['properties'], 'metadata': image['metadata'] } extra = {} MAPPED_ATTRS = { 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' }, 'props': { 'name': 'name', 'description': 'description', 'location': 'location', 'size': 'size', 'cpuHotPlug': 'cpu_hot_plug', 'cpuHotUnplug': 'cpu_hot_unplug', 'ramHotPlug': 'ram_hot_plug', 'ramHotUnplug': 'ram_hot_unplug', 'nicHotPlug': 'nic_hot_plug', 'nicHotUnplug': 'nic_hot_unplug', 'discVirtioHotPlug': 'disc_virtio_hot_plug', 'discVirtioHotUnplug': 'disc_virtio_hot_unplug', 'discScsiHotPlug': 'disc_scsi_hot_plug', 'discScsiHotUnplug': 'disc_scsi_hot_unplug', 'licenceType': 'licence_type', 'imageType': 'image_type', 'public': 'public' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] """ Put the href inside extra because we cannot assign it to the NodeImage type. """ extra['href'] = image['href'] return NodeImage( id=image['id'], name=image['properties']['name'], driver=self.connection.driver, extra=extra ) def _to_nodes(self, object): return [self._to_node(n) for n in object['items']] def _to_node(self, node, headers=None): """ Convert the request into a node Node """ nested = { 'props': node['properties'], 'metadata': node['metadata'], 'entities': node['entities'] } extra = {} MAPPED_ATTRS = { 'props': { 'name': 'name', 'availabilityZone': 'availability_zone', 'bootCdrom': 'boot_cdrom', 'bootVolume': 'boot_volume', 'cores': 'cores', 'cpuFamily': 'cpu_family', 'ram': 'ram', 'vmState': 'vm_state' }, 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedBy': 'last_modified_by', 'lastModifiedDate': 'last_modified_date', 'state': 'state' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] state = self.NODE_STATE_MAP.get( node['properties']['vmState'], NodeState.UNKNOWN ) extra['entities'] = nested['entities'] extra['href'] = node['href'] public_ips = [] private_ips = [] if 'nics' in nested['entities']: if 'items' in nested['entities']['nics']: for nic in nested['entities']['nics']['items']: if nic['properties']['nat'] is True: public_ips += nic['properties']['ips'] elif nic['properties']['nat'] is False: private_ips += nic['properties']['ips'] return Node( id=node['id'], name=nested['props']['name'], state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra ) def _to_volumes(self, object): return [self._to_volume( volume) for volume in object['items']] def _to_volume(self, volume, headers=None): nested = { 'props': volume['properties'], 'metadata': volume['metadata'] } extra = {} MAPPED_ATTRS = { 'props': { 'bus': 'bus', 'size': 'size', 'cpuHotPlug': 'cpu_hot_plug', 'cpuHotUnplug': 'cpu_hot_unplug', 'deviceNumber': 'device_number', 'discScsiHotPlug': 'disc_scsi_hot_plug', 'discScsiHotUnplug': 'disc_scsi_hot_unplug', 'discVirtioHotPlug': 'disc_virtio_hot_plug', 'discVirtioHotUnplug': 'disc_virtio_hot_unplug', 'image': 'image', 'imagePassword': 'image_password', 'licenceType': 'licence_type', 'name': 'name', 'nicHotPlug': 'nic_hot_plug', 'nicHotUnplug': 'nic_hot_unplug', 'ramHotPlug': 'ram_hot_plug', 'ramHotUnplug': 'ram_hot_unplug', 'sshKeys': 'ssh_keys', 'type': 'type', 'deviceNumber': 'device_number' }, 'metadata': { 'createdBy': 'created_by', 'createdDate': 'created_date', 'etag': 'etag', 'lastModifiedBy': 'last_modified_by', 'lastModifiedDate': 'last_modified_date', 'state': 'state' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] extra['provisioning_state'] = self.PROVISIONING_STATE.get( volume['metadata']['state'], NodeState.UNKNOWN ) extra['href'] = volume['href'] if 'availabilityZone' in volume['properties']: properties = volume['properties'] extra['availability_zone'] = properties['availabilityZone'] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] return StorageVolume( id=volume['id'], name=volume['properties']['name'], size=volume['properties']['size'], driver=self.connection.driver, extra=extra ) def _to_interfaces(self, object): return [self._to_interface( interface) for interface in object['items']] def _to_interface(self, interface, headers=None): nested = { 'props': interface['properties'], 'metadata': interface['metadata'] } extra = {} MAPPED_ATTRS = { 'props': { 'dhcp': 'dhcp', 'firewallActive': 'firewall_active', 'ips': 'ips', 'lan': 'lan', 'mac': 'mac', 'name': 'name', 'nat': 'nat' }, 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedBy': 'last_modified_by', 'lastModifiedDate': 'last_modified_date', 'state': 'state' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if 'entities' in interface: extra['entities'] = interface['entities'] state = self.NODE_STATE_MAP.get( interface['metadata']['state'], NodeState.UNKNOWN ) if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] return ProfitBricksNetworkInterface( id=interface['id'], name=interface['properties']['name'], href=interface['href'], state=state, extra=extra ) def _to_firewall_rules(self, object): return [self._to_firewall_rule( firewall_rule) for firewall_rule in object['items']] def _to_firewall_rule(self, firewallrule, headers=None): nested = { 'props': firewallrule['properties'], 'metadata': firewallrule['metadata'] } extra = {} MAPPED_ATTRS = { 'props': { 'name': 'name', 'protocol': 'protocol', 'sourceMac': 'source_mac', 'sourceIp': 'source_ip', 'targetIp': 'target_ip', 'icmpCode': 'icmp_code', 'icmpType': 'icmp_type', 'portRangeStart': 'port_range_start', 'portRangeEnd': 'port_range_end' }, 'metadata': { 'createdDate': 'created_date', 'createdBy': 'created_by', 'etag': 'etag', 'lastModifiedDate': 'last_modified_date', 'lastModifiedBy': 'last_modified_by', 'state': 'state' } } for k, v in MAPPED_ATTRS.items(): for original_name, altered_name in v.items(): extra[altered_name] = nested[k][original_name] if headers is not None: if 'location' in headers: extra['status_url'] = headers['location'] state = self.NODE_STATE_MAP.get( firewallrule['metadata']['state'], NodeState.UNKNOWN ) return ProfitBricksFirewallRule( id=firewallrule['id'], name=firewallrule['properties']['name'], href=firewallrule['href'], state=state, extra=extra ) def _to_locations(self, object): return [self._to_location(location) for location in object['items']] def _to_location(self, location): return NodeLocation( id=location['id'], name=location['properties']['name'], country=location['id'].split('/')[0], driver=self.connection.driver ) def _to_node_size(self, data): """ Convert the PROFIT_BRICKS_GENERIC_SIZES into NodeSize """ return NodeSize( id=data["id"], name=data["name"], ram=data["ram"], disk=data["disk"], bandwidth=None, price=None, driver=self.connection.driver, extra={'cores': data["cores"]} ) def _wait_for_datacenter_state( self, datacenter, state=NodeState.RUNNING, timeout=300, interval=5 ): """ Private function that waits the datacenter to transition into the specified state. :return: Datacenter object on success. :rtype: :class:`.Datacenter` """ wait_time = 0 attempts = 0 while attempts < 5: attempts += 1 try: datacenter = self.ex_describe_datacenter( ex_datacenter_id=datacenter.id ) break except BaseHTTPError: time.sleep(interval) if datacenter is None: raise Exception( 'Data center was not ready in time to ' 'complete this operation.' ) while (datacenter.extra['provisioning_state'] != state): datacenter = \ self.ex_describe_datacenter(ex_href=datacenter.href) if datacenter.extra['provisioning_state'] == state: break if wait_time >= timeout: raise Exception( 'Datacenter didn\'t transition to %s state ' 'in %s seconds' % (state, timeout) ) wait_time += interval time.sleep(interval) return datacenter def _create_new_datacenter_for_node( self, name, location=None ): """ Creates a Datacenter for a node. """ dc_name = name + '-DC' if location is None: location = self.ex_describe_location( ex_location_id='us/las' ) return self.ex_create_datacenter( name=dc_name, location=location ) apache-libcloud-2.2.1/libcloud/compute/drivers/oneandone.py0000664000175000017500000016167213155313701023701 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ 1&1 Cloud Server Compute driver """ import json from libcloud.compute.providers import Provider from libcloud.common.base import JsonResponse, ConnectionKey from libcloud.compute.base import NodeSize, NodeImage, NodeLocation, \ Node, NodeAuthPassword, NodeAuthSSHKey from libcloud.common.types import InvalidCredsError from libcloud.compute.types import NodeState from libcloud.utils.py3 import httplib from libcloud.compute.base import NodeDriver from time import sleep API_HOST = 'cloudpanel-api.1and1.com' API_VERSION = '/v1/' __all__ = [ 'API_HOST', 'API_VERSION', 'OneAndOneResponse', 'OneAndOneConnection', 'OneAndOneNodeDriver' ] class OneAndOneResponse(JsonResponse): """ OneAndOne response parsing. """ valid_response_codes = [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): if self.status == httplib.UNAUTHORIZED: body = self.parse_body() raise InvalidCredsError(body['message']) else: body = self.parse_body() if 'message' in body: error = '%s (code: %s)' % (body['message'], self.status) else: error = body return error def success(self): return self.status in self.valid_response_codes class OneAndOneConnection(ConnectionKey): """ Connection class for the 1&1 driver """ host = API_HOST api_prefix = API_VERSION responseCls = OneAndOneResponse def encode_data(self, data): return json.dumps(data) def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` and ``Content-Type`` to the request. """ headers['X-Token'] = self.key headers['Content-Type'] = 'application/json' return headers def request(self, action, params=None, data=None, headers=None, method='GET', raw=False): """ Some requests will use the href attribute directly. If this is not the case, then we should formulate the url based on the action specified. If we are using a full url, we need to remove the host and protocol components. """ action = self.api_prefix + action.lstrip('/') return super(OneAndOneConnection, self). \ request(action=action, params=params, data=data, headers=headers, method=method, raw=raw) class OneAndOneNodeDriver(NodeDriver): """ Base OneAndOne node driver. """ connectionCls = OneAndOneConnection name = '1and1' website = 'http://www.1and1.com' type = Provider.ONEANDONE NODE_STATE_MAP = { 'POWERING_ON': NodeState.STARTING, 'POWERING_OFF': NodeState.PENDING, 'POWERED_OFF': NodeState.STOPPING, 'POWERED_ON': NodeState.RUNNING, 'REBOOTING': NodeState.REBOOTING, 'CONFIGURING': NodeState.RECONFIGURING, 'REMOVING': NodeState.UNKNOWN, 'DEPLOYING': NodeState.STARTING, } """ Core Functions """ def list_sizes(self): """ Lists all sizes :return: A list of all configurable node sizes. :rtype: ``list`` of :class:`NodeSize` """ sizes = [] fixed_instances = self._list_fixed_instances() for value in fixed_instances: node_size = self._to_node_size(value) sizes.append(node_size) return sizes def list_locations(self): """ Lists all locations :return: ``list`` of :class:`NodeLocation` :rtype: ``list`` """ datacenters = self.ex_list_datacenters() locations = [] for values in datacenters: node_size = self._to_location(values) locations.append(node_size) return locations def list_images(self, image_type=None): """ :return: ``list`` of :class: `NodeImage` :rtype: ``list`` """ response = self.connection.request( action='server_appliances', method='GET' ) return self._to_images(response.object, image_type) def get_image(self, image_id): response = self.connection.request( action='server_appliances/%s' % image_id, method='GET' ) return self._to_image(response.object) """ Node functions """ def create_node(self, name, image, ex_fixed_instance_size_id, location=None, auth=None, ex_ip=None, ex_monitoring_policy_id=None, ex_firewall_policy_id=None, ex_loadbalancer_id=None, ex_description=None, ex_power_on=None): """ Creates a node. :param name: The name of the new node :type name: `str` :param ex_fixed_instance_size_id: Fixed instance size ID from list_sizes :type ex_fixed_instance_size_id: ``str`` :param location: 1&1 Data center Location :type location: `NodeLocation` :param ex_ip: IP address :type ex_ip: `str` :param ex_ssh_key: SSH Key :type ex_ssh_key: `str` :param password: Password :type password: `str` :param ex_monitoring_policy_id: :type ex_firewall_policy_id: `str` :param ex_firewall_policy_id: :type ex_firewall_policy_id: `str` :param ex_loadbalancer_id: :type ex_loadbalancer_id: `str` :param ex_description: :type ex_description: `str` :param ex_power_on: :type ex_power_on: `bool` :return: Instance of class ``Node`` :rtype: :class:`Node` """ body = { 'name': name, 'appliance_id': image.id, 'hardware': { 'fixed_instance_size_id': ex_fixed_instance_size_id }, } if location is not None: body['datacenter_id'] = location.id if ex_power_on is not None: body['power_on'] = ex_power_on if ex_description is not None: body['description'] = ex_description if ex_firewall_policy_id is not None: body['firewall_policy_id'] = ex_firewall_policy_id if ex_monitoring_policy_id is not None: body['monitoring_policy_id'] = ex_monitoring_policy_id if ex_loadbalancer_id is not None: body['loadbalancer_id'] = ex_loadbalancer_id if auth is not None: if isinstance(auth, NodeAuthPassword): body['password'] = auth.password elif isinstance(auth, NodeAuthSSHKey): body['rsa_key'] = auth.pubkey if ex_ip is not None: body['ip_id'] = ex_ip response = self.connection.request( action='servers', data=body, method='POST', ) return self._to_node(response.object) def list_nodes(self): """ List all nodes. :return: ``list`` of :class:`Node` :rtype: ``list`` """ response = self.connection.request( action='servers', method='GET' ) return self._to_nodes(response.object) def destroy_node(self, node, ex_keep_ips=False): """ Destroys a node. :param node: The node you wish to destroy. :type volume: :class:`Node` :param ex_keep_ips: True to keep all IP addresses assigned to the node :type ex_keep_ips: : ``bool`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ self.ex_shutdown_server(node.id) self._wait_for_state(node.id, 'POWERED_OFF') response = self.connection.request( action='servers/%s' % node.id, params={'keep_ips': ex_keep_ips}, method='DELETE' ) return self._to_node(response.object) def reboot_node(self, node): """ Reboots the node. :param node: The node you wish to destroy. :type volume: :class:`Node` :return: Instance of class ``Node`` :rtype: :class: `Node` """ shutdown_body = { "action": "REBOOT", "method": "HARDWARE" } response = self.connection.request( action='servers/%s/status/action' % node.id, data=shutdown_body, method='PUT', ) return self._to_node(response.object) """ Extension functions """ def ex_rename_server(self, server_id, name=None, description=None): """ Renames the server :param server_id: ID of the server you want to rename :param name: New name of the server :type: ``str`` :param description: New description of the server :type: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ body = {} if name is not None: body["name"] = name if description is not None: body["description"] = description response = self.connection.request( action='servers/%s' % server_id, data=body, method='PUT' ) return self._to_node(response.object) def ex_get_server_hardware(self, server_id): """ Gets all server hardware :param server_id: Id of the server :type: ``str`` :return: Server's hardware :rtype: ``dict`` """ response = self.connection.request( action='servers/%s/hardware' % server_id, method='GET' ) return response.object """ Hardware operations """ def ex_modify_server_hardware(self, server_id, fixed_instance_size_id=None, vcore=None, cores_per_processor=None, ram=None): """ Modifies server's hardware :param server_id: :type: ``str`` :param fixed_instance_size_id: Id of the fixed instance size :type: ``str`` :param vcore: Virtual cores count :type: ``int`` :param cores_per_processor: Count of cores per procesor :type: ``int`` :param ram: Amount of ram for the server :type: ``int`` :return: Instance of class ``Node`` :type: :class: `Node` """ body = {} if fixed_instance_size_id is not None: body['firewall_policy_id'] = fixed_instance_size_id if vcore is not None: body['vcore'] = vcore if cores_per_processor is not None: body['cores_per_processor'] = cores_per_processor if ram is not None: body['ram'] = ram response = self.connection.request( action='servers/%s/hardware' % server_id, data=body, method='PUT' ) return self._to_node(response.object) """ HDD operations """ def ex_modify_server_hdd(self, server_id, hdd_id=None, size=None): """ Modifies server hard disk drives :param server_id: Id of the server :type: ``str`` :param hdd_id: Id of the hard disk :type: ``str`` :param size: Size of the hard disk :type: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ body = {} if size is not None: body['size'] = size response = self.connection.request( action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id), data=body, method='PUT' ) return self._to_node(response.object) def ex_add_hdd(self, server_id, size, is_main): """ Add a hard disk to the server :param server_id: Id of the server :type: ``str`` :param size: Size of the new disk :type: ``str`` :param is_main: Indicates if the disk is going to be the boot disk :type: ``boolean`` :return: Instance of class ``Node`` :type: :class: `Node` """ body = { 'size': size, 'is_main': is_main } response = self.connection.request( action='servers/%s/hardware/hdds' % server_id, data=body, method='POST' ) return self._to_node(response.object) def ex_remove_hdd(self, server_id, hdd_id): """ Removes existing hard disk :param server_id: Id of the server :type: ``str`` :param hdd_id: Id of the hard disk :type: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ response = self.connection.request( action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id), method='DELETE' ) return self._to_node(response.object) """ Data center operations """ def ex_list_datacenters(self): """ Lists all data centers :return: List of data centers :rtype: ``dict`` """ response = self.connection.request( action='datacenters', method='GET' ) return response.object def ex_get_server(self, server_id): """ Gets a server :param server_id: Id of the server to be retrieved :type: ``str`` :return: Instance of class ``Node`` :rtype: :class: `Node` """ response = self.connection.request( action='servers/%s' % (server_id), method='GET' ) return self._to_node(response.object) def ex_shutdown_server(self, server_id, method='SOFTWARE'): """ Shuts down the server :param server_id: Id of the server to be shut down :type: ``str`` :param method: Method of shutting down "SOFTWARE" or "HARDWARE" :return: Instance of class ``Node`` :rtype: :class: `Node` """ shutdown_body = { 'action': 'POWER_OFF', 'method': method } response = self.connection.request( action='servers/%s/status/action' % (server_id), data=shutdown_body, method='PUT', ) return self._to_node(response.object) """ Image operations """ def ex_get_server_image(self, server_id): """ Gets server image :param server_id: Id of the server :type: ``str`` :return: Server image :rtype: ``dict`` """ response = self.connection.request( action='servers/%s/image' % server_id, method='GET' ) return response.object def ex_reinstall_server_image(self, server_id, image_id, password=None): """ Installs a new image on the server :param server_id: Id of the server :type: ``str`` :param image_id: Id of the image (Server Appliance) :type: ``str`` :param password: New password for the server :return: Instance of class ``Node`` :rtype: :class: `Node` """ body = { 'id': image_id, } if password is not None: body['password'] = password response = self.connection.request( action='servers/%s/image' % server_id, data=body, method='PUT' ) return self._to_node(response.object) """ Server IP operations """ def ex_list_server_ips(self, server_id): """ Gets all server IP objects :param server_id: Id of the server :type: ``str`` :return: List of server IP objects :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='servers/%s/ips' % server_id, method='GET' ) return response.object def ex_get_server_ip(self, server_id, ip_id): """ Get a single server IP object :param server_id: Id of the server :type: ``str`` :param ip_id: ID of the IP address :type: ``str`` :return: IP address object :rtype: ``dict`` """ response = self.connection.request( action='servers/%s/ips/%s' % (server_id, ip_id), method='GET' ) return response.object def ex_assign_server_ip(self, server_id, ip_type): """ Assigns a new IP address to the server :param server_id: Id of the server :type: ``str`` :param ip_type: Type of the IP address [IPV4,IPV6] :type: ``str`` :return: ``Node`` instance :rtype: ``Node`` """ body = { 'type': ip_type } response = self.connection.request( action='servers/%s/ips' % server_id, data=body, method='POST' ) return self._to_node(response.object) def ex_remove_server_ip(self, server_id, ip_id, keep_ip=None): """ Removes an IP address from the server :param server_id: Id of the server :type: ``str`` :param ip_id: ID of the IP address :type: ``str`` :param keep_ip: Indicates whether IP address will be removed from the Cloud Panel :type: ``boolean`` :return: ``Node`` instance :rtype: ``Node`` """ body = {} if keep_ip is not None: body['keep_ip'] = keep_ip response = self.connection.request( action='servers/%s/ips/%s' % (server_id, ip_id), data=body, method='DELETE' ) return self._to_node(response.object) def ex_get_server_firewall_policies(self, server_id, ip_id): """ Gets a firewall policy of attached to the server's IP :param server_id: Id of the server :type: ``str`` :param ip_id: ID of the IP address :type: ``str`` :return: IP address object :rtype: ``dict`` """ response = self.connection.request( action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id), method='GET' ) return response.object def ex_remove_server_firewall_policy(self, server_id, ip_id): """ Removes a firewall policy from server's IP :param server_id: Id of the server :type: ``str`` :param ip_id: ID of the IP address :type: ``str`` :return: ``Node`` instance :rtype: ``Node`` """ response = self.connection.request( action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id), method='DELETE' ) return self._to_node(response.object) def ex_add_server_firewall_policy(self, server_id, ip_id, firewall_id): """ Adds a firewall policy to the server's IP address :param server_id: Id of the server :type: ``str`` :param ip_id: ID of the IP address :type: ``str`` :param firewall_id: ID of the firewall policy :type: ``str`` :return: ``Node`` instance :rtype: ``Node`` """ body = { 'id': firewall_id } response = self.connection.request( action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id), data=body, method='POST' ) return self._to_node(response.object) """ Firewall Policy operations """ def ex_create_firewall_policy(self, name, rules, description=None): """ Creates a firewall Policy. :param name: :param description: :param rules: :rtype: `dict` :return: `dict` firewall policy """ body = { 'name': name } if description is not None: body['description'] = description if len(rules) == 0: raise ValueError( 'At least one firewall rule is required.' ) else: body['rules'] = rules response = self.connection.request( action='firewall_policies', data=body, method='POST', ) return response.object def ex_list_firewall_policies(self): """" List firewall policies :return: 'dict' """ response = self.connection.request( action='firewall_policies', method='GET' ) return response.object def ex_get_firewall_policy(self, fw_id): """ Gets firewall policy :param fw_id: ID of the firewall policy :return: 'dict' """ response = self.connection.request( action='firewall_policy/%s' % fw_id, method='GET' ) return response.object def ex_delete_firewall_policy(self, fw_id): """ Deletes firewall policy :param fw_id: ID of the Firewall :return: 'dict' """ response = self.connection.request( action='firewall_policy/%s' % fw_id, method='DELETE' ) return response.object """ Shared storage operations """ def ex_list_shared_storages(self): """ List of shared storages :return: 'dict' """ response = self.connection.request( action='shared_storages', method='GET' ) return response.object def ex_get_shared_storage(self, storage_id): """ Gets a shared storage :return: 'dict' """ response = self.connection.request( action='shared_storages/%s' % (storage_id), method='GET' ) return response.object def ex_create_shared_storage(self, name, size, datacenter_id=None, description=None): """ Creates a shared storage :param name: Name of the storage :param size: Size of the storage :param datacenter_id: datacenter where storage should be created :param description: description ot the storage :return: 'dict' """ body = { 'name': name, 'size': size, 'datacenter_id': datacenter_id } if description is not None: body['description'] = description response = self.connection.request( action='shared_storages', data=body, method='POST' ) return response.object def ex_delete_shared_storage(self, storage_id): """ Removes a shared storage :param storage_id: Id of the shared storage :type: ``str`` :return: Instnace of shared storage :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='shared_storages/%s' % storage_id, method='DELETE' ) return response.object def ex_attach_server_to_shared_storage(self, storage_id, server_id, rights): """ Attaches a single server to a shared storage :param storage_id: Id of the shared storage :param server_id: Id of the server to be attached to the shared storage :param rights: :return: :rtype: 'dict' """ body = { 'severs': [ { 'id': server_id, 'rights': rights } ] } response = self.connection.request( action='shared_storages/%s/servers' % storage_id, data=body, method='POST' ) return response.object def ex_get_shared_storage_server(self, storage_id, server_id): """ Gets a shared storage's server :param storage_id: :param server_id: :return: """ response = self.connection.request( action='shared_storages/%s/servers/%s' % (storage_id, server_id), ) return response.object def ex_detach_server_from_shared_storage(self, storage_id, server_id): """ Detaches a server from shared storage :param storage_id: Id of the shared storage :type: ``str`` :param server_id: Id of the server :type: ``str`` :return: Instance of shared storage :rtype: ``dict`` """ response = self.connection.request( action='shared_storages/%s/servers/%s' % (storage_id, server_id), method='DELETE' ) return response.object """ Load Balancers operations """ def ex_create_load_balancer(self, name, method, rules, persistence=None, persistence_time=None, health_check_test=None, health_check_interval=None, health_check_path=None, health_check_parser=None, datacenter_id=None, description=None): """ :param name: Name of the load balancer :param method: Load balancer method :param rules: Load balancer rules :type rules: ``list`` of ``dict`` :param persistence: Indictes if persistance is set :type persistence: ``boolean`` :param persistence_time: Persistance time :type persistence_time: ``int`` :param health_check_test: Type of test :type health_check_test:``str`` :param health_check_interval: Interval of the check :param health_check_path: Path :type health_check_path: ``str`` :param health_check_parser: Parser :type health_check_parser:``str`` :param datacenter_id: Data center id :type datacenter_id:``str`` :param description: Description of load balancer :type description:``str`` :return: ``dict`` """ body = { 'name': name, 'method': method, } body['rules'] = [] body['rules'] = rules if persistence is not None: body['persistence'] = persistence if persistence_time is not None: body['persistence_time'] = persistence_time if health_check_test is not None: body['health_check_test'] = health_check_test if health_check_interval is not None: body['health_check_interval'] = health_check_interval if health_check_path is not None: body['health_check_path'] = health_check_path if health_check_parser is not None: body['health_check_parser'] = health_check_parser if datacenter_id is not None: body['datacenter_id'] = datacenter_id if description is not None: body['description'] = description response = self.connection.request( action='load_balancers', data=body, method='POST' ) return response.object def ex_update_load_balancer(self, lb_id, name=None, description=None, health_check_test=None, health_check_interval=None, persistence=None, persistence_time=None, method=None): body = {} if name is not None: body['name'] = name if description is not None: body['description'] = description if health_check_test is not None: body['health_check_test'] = health_check_test if health_check_interval is not None: body['health_check_interval'] = health_check_interval if persistence is not None: body['persistence'] = persistence if persistence_time is not None: body['persistence_time'] = persistence_time if method is not None: body['method'] = method response = self.connection.request( action='load_balancers/%s' % lb_id, data=body, method='PUT' ) return response.object def ex_add_servers_to_load_balancer(self, lb_id, server_ips=[]): """ Adds server's IP address to load balancer :param lb_id: Load balancer ID :type: ``str`` :param server_ips: Array of server IP IDs :type: ``list`` of ``str`` :return: Instance of load balancer :rtype: ``dict`` """ body = { 'server_ips': server_ips, } response = self.connection.request( action='load_balancers/%s/server_ips' % lb_id, data=body, method='POST' ) return response.object def ex_remove_server_from_load_balancer(self, lb_id, server_ip): """ Removes server's IP from load balancer :param lb_id: Load balancer ID :type: ``str`` :param server_ip: ID of the server IP :type: ``str`` :return: Instance of load balancer :rtype: ``dict`` """ response = self.connection.request( action='/load_balancers/%s/server_ips/%s' % (lb_id, server_ip), method='DELETE' ) return response.object def ex_add_load_balancer_rule(self, lb_id, protocol, port_balancer, port_server, source=None): """ Adds a rule to load balancer :param lb_id: Load balancer ID :rtype: ``str`` :param protocol: Load balancer protocol :rtype: ``str`` :param port_balancer: Port to be balananced :rtype: ``int`` :param port_server: Server port :rtype: ``int`` :param source: Source IP address :rtype: ``str`` :return: Instance of load balancer :rtype: ``dict`` """ body = { 'rules': [ { 'protocol': protocol, 'port_balancer': port_balancer, 'port_server': port_server } ] } if source is not None: body['rules'][0]['source'] = source response = self.connection.request( action='/load_balancers/%s/rules' % lb_id, data=body, method='POST' ) return response.object def ex_remove_load_balancer_rule(self, lb_id, rule_id): """ Removes load balancer rule :param lb_id: Load balancer ID :rtype: ``str`` :param rule_id: Rule ID :rtype: ``str`` :return: Instance of load balancer :rtype: ``dict`` """ response = self.connection.request( action='/load_balancers/%s/rules/%s' % (lb_id, rule_id), method='DELETE' ) return response.object def ex_list_load_balancers(self): """ Lists all load balancers :return: List of load balancers :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='load_balancers', method='GET' ) return response.object def ex_get_load_balancer(self, lb_id): """ Gets a single load balancer :param lb_id: ID of the load balancer :type lb_id: ``str`` :return: Instance of load balancer :rtype: ``dict`` """ response = self.connection.request( action='load_balancers/%s' % lb_id, method='GET' ) return response.object def ex_list_load_balancer_server_ips(self, lb_id): """ List balanced server IP addresses :param lb_id: ID of the load balancer :type lb_id: ``str`` :return: Array of IP address IDs :rtype: ``dict`` """ response = self.connection.request( action='load_balancers/%s/server_ips' % lb_id, method='GET' ) return response.object def ex_get_load_balancer_server_ip(self, lb_id, server_ip): """ Gets load balanced server id :param lb_id: ID of the load balancer :type lb_id: ``str`` :param server_ip: ID of the server IP :type server_ip: ``str`` :return: Server IP :rtype: ``dict`` """ response = self.connection.request( action='load_balancers/%s/server_ips/%s' % (lb_id, server_ip), method='GET' ) return response.object def ex_list_load_balancer_rules(self, lb_id): """ Lists loadbalancer rules :param lb_id: ID of the load balancer :type lb_id: ``str`` :return: Lists of rules :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='load_balancers/%s/rules' % lb_id, method='GET' ) return response.object def ex_get_load_balancer_rule(self, lb_id, rule_id): """ Get a load balancer rule :param lb_id: ID of the load balancer :type lb_id: ``str`` :param rule_id: Rule ID :type rule_id: ``str`` :return: A load balancer rule :rtype: ``dict`` """ response = self.connection.request( action='load_balancers/%s/rules/%s' % (lb_id, rule_id), method='GET' ) return response.object def ex_delete_load_balancer(self, lb_id): """ Deletes a load balancer rule :param lb_id: ID of the load balancer :type lb_id: ``str`` :param rule_id: Rule ID :type rule_id: ``str`` :return: Instance of load balancer :rtype: ``dict`` """ response = self.connection.request( action='load_balancers/%s' % lb_id, method='DELETE' ) return response.object """ Public IP operations """ def ex_list_public_ips(self): """ Lists all public IP addresses :return: Array of public addresses :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='public_ips', method='GET' ) return response.object def ex_create_public_ip(self, type, reverse_dns=None, datacenter_id=None): """ Creates a public IP :param type: Type of IP (IPV4 or IPV6) :type type: ``str`` :param reverse_dns: Reverse DNS :type reverse_dns: ``str`` :param datacenter_id: Datacenter ID where IP address will be crated :type datacenter_id: ``str`` :return: Instance of Public IP :rtype: ``dict`` """ body = { 'type': type } if reverse_dns is not None: body['reverse_dns'] = reverse_dns if datacenter_id is not None: body['datacenter_id'] = datacenter_id response = self.connection.request( action='public_ips', data=body, method='POST' ) return response.object def ex_get_public_ip(self, ip_id): """ Gets a Public IP :param ip_id: ID of the IP :type ip_id: ``str`` :return: Instance of Public IP :rtype: ``dict`` """ response = self.connection.request( action='public_ips/%s' % ip_id, method='GET' ) return response.object def ex_delete_public_ip(self, ip_id): """ Deletes a public IP :param ip_id: ID of public IP :type ip_id: ``str`` :return: Instance of IP Address :rtype: ``dict`` """ response = self.connection.request( action='public_ips/%s' % ip_id, method='DELETE' ) return response def ex_update_public_ip(self, ip_id, reverse_dns): """ Updates a Public IP :param ip_id: ID of public IP :type ip_id: ``str`` :param reverse_dns: Reverse DNS :type reverse_dns: ``str`` :return: Instance of Public IP :rtype: ``dict`` """ body = { 'reverse_dns': reverse_dns } response = self.connection.request( action='public_ips/%s' % ip_id, data=body, method='DELETE' ) return response.object """ Private Network Operations """ def ex_list_private_networks(self): """ Lists all private networks :return: List of private networks :rtype: ``dict`` """ response = self.connection.request( action='private_networks', method='GET' ) return response.object def ex_create_private_network(self, name, description=None, datacenter_id=None, network_address=None, subnet_mask=None): """ Creates a private network :param name: Name of the private network :type name: ``str`` :param description: Description of the private network :type description: ``str`` :param datacenter_id: ID of the data center for the private network :type datacenter_id: ``str`` :param network_address: Network address of the private network :type network_address: ``str`` :param subnet_mask: Subnet mask of the private network :type subnet_mask: ``str`` :return: Newly created private network :rtype: ``dict`` """ body = { 'name': name } if description is not None: body['description'] = description if datacenter_id is not None: body['datacenter_id'] = datacenter_id if network_address is not None: body['network_address'] = network_address if subnet_mask is not None: body['subnet_maks'] = subnet_mask response = self.connection.request( action='private_networks', data=body, method='POST' ) return response.object def ex_delete_private_network(self, network_id): """ Deletes a private network :param network_id: Id of the private network :type network_id: ``str`` :return: Instance of the private network being deleted :rtype: ``dict`` """ response = self.connection.request( action='private_networks' % network_id, method='DELETE' ) return response.object def ex_update_private_network(self, network_id, name=None, description=None, datacenter_id=None, network_address=None, subnet_mask=None): """ Updates a private network :param name: Name of the private network :type name: ``str`` :param description: Description of the private network :type description: ``str`` :param datacenter_id: ID of the data center for the private network :type datacenter_id: ``str`` :param network_address: Network address of the private network :type network_address: ``str`` :param subnet_mask: Subnet mask of the private network :type subnet_mask: ``str`` :return: Instance of private network :rtype: ``dict`` """ body = {} if name is not None: body['name'] = name if description is not None: body['description'] = description if datacenter_id is not None: body['datacenter_id'] = datacenter_id if network_address is not None: body['network_address'] = network_address if subnet_mask is not None: body['subnet_maks'] = subnet_mask response = self.connection.request( action='private_networks/%s', data=body, method='PUT' ) return response.object def ex_list_private_network_servers(self, network_id): """ Lists all private network servers :param network_id: Private network ID :type network_id: ``str`` :return: List of private network servers :rtype: ``dict`` """ response = self.connection.request( action='/private_networks/%s/servers' % network_id, method='GET' ) return response.object def ex_add_private_network_server(self, network_id, server_ids): """ Add servers to private network :param network_id: Private Network ID :type network_id: ``str`` :param server_ids: List of server IDs :type server_ids: ``list`` of ``str`` :return: List of attached servers :rtype: ``dict`` """ body = { 'servers': server_ids } response = self.connection.request( action='/private_networks/%s/servers' % network_id, data=body, method='POST' ) return response.object def ex_remove_server_from_private_network(self, network_id, server_id): """ Removes a server from the private network :param network_id: Private Network ID :type network_id: ``str`` :param server_id: Id of the server :type server_id: ``str`` :return: Instance of the private network :rtype: ``dict`` """ response = self.connection.request( action='/private_networks/%s/servers/%s' % (network_id, server_id), method='POST' ) return response.object """ Monitoring policy operations """ def ex_list_monitoring_policies(self): """ Lists all monitoring policies :return: List of monitoring policies :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies', method='GET' ) return response.object def ex_create_monitoring_policy(self, name, thresholds, ports, processes, description=None, email=None, agent=None, ): """ Creates a monitoring policy :param name: Name for the monitoring policy :type name: ``str`` :param thresholds: Thresholds for the monitoring policy :type thresholds: ``dict`` :param ports: Monitoring policies for ports :type ports: ``list`` of ``dict`` :param processes: Processes to be monitored :type processes: ``list`` of ``dict`` :param description: Description for the monitoring policy :type description: ``str`` :param email: Email for notifications :type email: ``str`` :param agent: Indicates if agent application will be installed :type agent: ``boolean`` :return: Newly created instance of monitofing policy :rtype: ``dict`` """ body = { 'name': name, 'thresholds': thresholds, 'ports': ports, 'processes': processes } if description is not None: body['description'] = description if email is not None: body['email'] = email if agent is not None: body['agent'] = agent response = self.connection.request( action='monitoring_policies', data=body, method='POST' ) return response.object def ex_delete_monitoring_policy(self, policy_id): """ Deletes a monitoring policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :return: Instance of the monitoring policy being deleted :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies' % policy_id, method='DELETE' ) return response.object def ex_update_monitoring_policy(self, policy_id, email, thresholds, name=None, description=None): """ Updates monitoring policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param email: Email to send notifications to :type email: ``str`` :param thresholds: Thresholds for the monitoring policy :type thresholds: ``dict`` :param name: Name of the monitoring policy :type name: ``str`` :param description: Description of the monitoring policy :type description: ``str`` :return: Instance of the monitoring policy being deleted :rtype: ``dict`` """ body = {} if name is not None: body['name'] = name if description is not None: body['description'] = description if thresholds is not None: body['thresholds'] = thresholds if email is not None: body['email'] = email response = self.connection.request( action='monitoring_policies/%s' % policy_id, data=body, method='PUT' ) return response.object def ex_get_monitoring_policy(self, policy_id): """ Fetches a monitoring policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s' % policy_id, method='GET' ) return response.object def ex_get_monitoring_policy_ports(self, policy_id): """ Fetches monitoring policy ports :param policy_id: Id of the monitoring policy :type policy_id: :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/ports' % policy_id, method='GET' ) return response.object def ex_get_monitoring_policy_port(self, policy_id, port_id): """ Fetches monitoring policy port :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param port_id: Id of the port :type port_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/ports/%s' % (policy_id, port_id), method='GET' ) return response.object def ex_remove_monitoring_policy_port(self, policy_id, port_id): """ Removes monitoring policy port :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param port_id: Id of the port :type port_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/ports/%s' % (policy_id, port_id), method='DELETE' ) return response.object def ex_add_monitoring_policy_ports(self, policy_id, ports): """ Add monitoring policy ports :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param ports: List of ports :type ports: ``dict`` [ { 'protocol':'TCP', 'port':'80', 'alert_if':'RESPONDING', 'email_notification':true } ] :return: Instance of a monitoring policy :rtype: ``dict`` """ body = {'ports': ports} response = self.connection.request( action='monitoring_policies/%s/ports' % policy_id, data=body, method='POST' ) return response.object def ex_get_monitoring_policy_processes(self, policy_id): """ Fetches monitoring policy processes :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/processes' % policy_id, method='GET' ) return response.object def ex_get_monitoring_policy_process(self, policy_id, process_id): """ Fetches monitoring policy process :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param process_id: Id of the process :type process_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/processes/%s' % (policy_id, process_id), method='GET' ) return response.object def ex_remove_monitoring_policy_process(self, policy_id, process_id): """ Removes monitoring policy process :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param process_id: Id of the process :type process_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/processes/%s' % (policy_id, process_id), method='DELETE' ) return response.object def ex_add_monitoring_policy_processes(self, policy_id, processes): """ Add monitoring policy processes :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param processes: List of processes :type processes: ``list`` of ``dict`` [ { 'process': 'taskmmgr', 'alert_if': 'RUNNING', 'email_notification': true } ] :return: Instance of a monitoring policy :rtype: ``dict`` """ body = {'processes': processes} response = self.connection.request( action='monitoring_policies/%s/processes' % policy_id, data=body, method='POST' ) return response.object def ex_list_monitoring_policy_servers(self, policy_id): """ List all servers that are being monitoried by the policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :return: List of servers being monitored :rtype: ``list`` of ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/servers' % policy_id, method='GET' ) return response.object def ex_add_servers_to_monitoring_policy(self, policy_id, servers): """ Adds servers to monitoring policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param servers: List of server ID :type servers: ``list`` of ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ body = { 'servers': servers } response = self.connection.request( action='monitoring_policies/%s/servers' % policy_id, data=body, method='POST' ) return response.object def ex_remove_server_from_monitoring_policy(self, policy_id, server_id): """ Removes a server from monitoring policy :param policy_id: Id of the monitoring policy :type policy_id: ``str`` :param server_id: Id of the server :type server_id: ``str`` :return: Instance of a monitoring policy :rtype: ``dict`` """ response = self.connection.request( action='monitoring_policies/%s/servers/%s' % (policy_id, server_id), method='DELETE' ) return response.object """ Private Functions """ def _to_images(self, object, image_type=None): if image_type is not None: images = [image for image in object if image['type'] == image_type] else: images = [image for image in object] return [self._to_image(image) for image in images] def _to_image(self, data): extra = { 'os_family': data['os_family'], 'os': data['os'], 'os_version': data['os_version'], 'os_architecture': data['os_architecture'], 'os_image_type': data['os_image_type'], 'min_hdd_size': data['min_hdd_size'], 'available_datacenters': data['available_datacenters'], 'licenses': data['licenses'], 'version': data['version'], 'categories': data['categories'] } return NodeImage(id=data['id'], name=data['name'], driver=self, extra=extra) def _to_node_size(self, data): return NodeSize( id=data['id'], name=data['name'], ram=data['hardware']['ram'], disk=data['hardware']['hdds'][0]['size'], bandwidth=None, price=None, driver=self.connection.driver, extra={ 'vcores': data['hardware']['vcore'], 'cores_per_processor': data['hardware']['cores_per_processor']} ) def _to_location(self, location): return NodeLocation( id=location['id'], name=location['country_code'], country=location['location'], driver=self.connection.driver ) def _to_nodes(self, servers): return [self._to_node( server) for server in servers] def _to_node(self, server): extra = {} extra['datacenter'] = server['datacenter'] if 'description' in server: extra['description'] = server['description'] if 'status' in server: extra['status'] = server['status'] if 'image' in server: extra['image'] = server['image'] if 'hardware' in server: extra['hardware'] = server['hardware'] if 'dvd' in server: extra['dvd'] = server['dvd'] if 'snapshot' in server: extra['snapshot'] = server['snapshot'] if 'ips' in server: extra['ips'] = server['ips'] if 'alerts' in server: extra['alerts'] = server['alerts'] if 'monitoring_policy' in server: extra['monitoring_policy'] = server['monitoring_policy'] if 'private_networks' in server: extra['private_networks'] = server['private_networks'] ips = [] if server['ips'] is not None: for ip in server['ips']: ips.append(ip['ip']) state = self.NODE_STATE_MAP.get( server['status']['state']) return Node( id=server['id'], state=state, name=server['name'], driver=self.connection.driver, public_ips=ips, private_ips=None, extra=extra ) def _wait_for_state(self, server_id, state, retries=50): for i in (0, retries): server = self.ex_get_server(server_id) if server.extra['status']['state'] == state: return sleep(5) if i == retries: raise Exception('Retries count reached') def _list_fixed_instances(self): response = self.connection.request( action='/servers/fixed_instance_sizes', method='GET' ) return response.object apache-libcloud-2.2.1/libcloud/compute/drivers/medone.py0000664000175000017500000000370012701023453023163 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Med-1 Driver """ from libcloud.compute.providers import Provider from libcloud.common.dimensiondata import (DimensionDataConnection, API_ENDPOINTS) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver DEFAULT_REGION = 'med1-il' class MedOneNodeDriver(DimensionDataNodeDriver): """ Med-1 node driver, based on Dimension Data driver """ selected_region = None connectionCls = DimensionDataConnection name = 'MedOne' website = 'http://www.med-1.com/' type = Provider.MEDONE features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS: raise ValueError('Invalid region: %s' % (region)) self.selected_region = API_ENDPOINTS[region] super(MedOneNodeDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) apache-libcloud-2.2.1/libcloud/compute/drivers/dimensiondata.py0000664000175000017500000056005013153541406024546 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dimension Data Driver """ from libcloud.utils.py3 import ET from libcloud.common.dimensiondata import LooseVersion from libcloud.common.exceptions import BaseHTTPError from libcloud.compute.base import NodeDriver, Node, NodeAuthPassword from libcloud.compute.base import NodeSize, NodeImage, NodeLocation from libcloud.common.dimensiondata import dd_object_to_id from libcloud.common.dimensiondata import DimensionDataAPIException from libcloud.common.dimensiondata import (DimensionDataConnection, DimensionDataStatus) from libcloud.common.dimensiondata import DimensionDataNetwork from libcloud.common.dimensiondata import DimensionDataNetworkDomain from libcloud.common.dimensiondata import DimensionDataVlan from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification from libcloud.common.dimensiondata import DimensionDataServerDisk from libcloud.common.dimensiondata import DimensionDataServerVMWareTools from libcloud.common.dimensiondata import DimensionDataPublicIpBlock from libcloud.common.dimensiondata import DimensionDataFirewallRule from libcloud.common.dimensiondata import DimensionDataFirewallAddress from libcloud.common.dimensiondata import DimensionDataNatRule from libcloud.common.dimensiondata import DimensionDataAntiAffinityRule from libcloud.common.dimensiondata import DimensionDataIpAddressList from libcloud.common.dimensiondata import DimensionDataChildIpAddressList from libcloud.common.dimensiondata import DimensionDataIpAddress from libcloud.common.dimensiondata import DimensionDataPortList from libcloud.common.dimensiondata import DimensionDataPort from libcloud.common.dimensiondata import DimensionDataChildPortList from libcloud.common.dimensiondata import DimensionDataNic from libcloud.common.dimensiondata import NetworkDomainServicePlan from libcloud.common.dimensiondata import DimensionDataTagKey from libcloud.common.dimensiondata import DimensionDataTag from libcloud.common.dimensiondata import API_ENDPOINTS, DEFAULT_REGION from libcloud.common.dimensiondata import TYPES_URN from libcloud.common.dimensiondata import SERVER_NS, NETWORK_NS, GENERAL_NS from libcloud.utils.py3 import urlencode, ensure_string from libcloud.utils.xml import fixxpath, findtext, findall from libcloud.utils.py3 import basestring from libcloud.compute.types import NodeState, Provider import sys # Node state map is a dictionary with the keys as tuples # These tuples represent: # (, , ) NODE_STATE_MAP = { ('NORMAL', 'false', None): NodeState.STOPPED, ('PENDING_CHANGE', 'false', None): NodeState.PENDING, ('PENDING_CHANGE', 'false', 'CHANGE_NETWORK_ADAPTER'): NodeState.PENDING, ('PENDING_CHANGE', 'true', 'CHANGE_NETWORK_ADAPTER'): NodeState.PENDING, ('PENDING_CHANGE', 'false', 'EXCHANGE_NIC_VLANS'): NodeState.PENDING, ('PENDING_CHANGE', 'true', 'EXCHANGE_NIC_VLANS'): NodeState.PENDING, ('NORMAL', 'true', None): NodeState.RUNNING, ('PENDING_CHANGE', 'true', 'START_SERVER'): NodeState.STARTING, ('PENDING_ADD', 'true', 'DEPLOY_SERVER'): NodeState.STARTING, ('PENDING_ADD', 'true', 'DEPLOY_SERVER_WITH_DISK_SPEED'): NodeState.STARTING, ('PENDING_CHANGE', 'true', 'SHUTDOWN_SERVER'): NodeState.STOPPING, ('PENDING_CHANGE', 'true', 'POWER_OFF_SERVER'): NodeState.STOPPING, ('PENDING_CHANGE', 'true', 'REBOOT_SERVER'): NodeState.REBOOTING, ('PENDING_CHANGE', 'true', 'RESET_SERVER'): NodeState.REBOOTING, ('PENDING_CHANGE', 'true', 'RECONFIGURE_SERVER'): NodeState.RECONFIGURING, } OBJECT_TO_TAGGING_ASSET_TYPE_MAP = { 'Node': 'SERVER', 'NodeImage': 'CUSTOMER_IMAGE', 'DimensionDataNetworkDomain': 'NETWORK_DOMAIN', 'DimensionDataVlan': 'VLAN', 'DimensionDataPublicIpBlock': 'PUBLIC_IP_BLOCK' } class DimensionDataNodeDriver(NodeDriver): """ DimensionData node driver. Default api_version is used unless specified. """ selected_region = None connectionCls = DimensionDataConnection name = 'DimensionData' website = 'http://www.dimensiondata.com/' type = Provider.DIMENSIONDATA features = {'create_node': ['password']} api_version = 1.0 def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS and host is None: raise ValueError( 'Invalid region: %s, no host specified' % (region)) if region is not None: self.selected_region = API_ENDPOINTS[region] if api_version is not None: self.api_version = api_version super(DimensionDataNodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) def _ex_connection_class_kwargs(self): """ Add the region to the kwargs before the connection is instantiated """ kwargs = super(DimensionDataNodeDriver, self)._ex_connection_class_kwargs() kwargs['region'] = self.selected_region kwargs['api_version'] = self.api_version return kwargs def _create_node_mcp1(self, name, image, auth, ex_description, ex_network=None, ex_memory_gb=None, ex_cpu_specification=None, ex_is_started=True, ex_primary_dns=None, ex_secondary_dns=None, **kwargs): """ Create a new DimensionData node :keyword name: String with a name for this new node (required) :type name: ``str`` :keyword image: OS Image to boot on node. (required) :type image: :class:`NodeImage` or ``str`` :keyword auth: Initial authentication information for the node. (If this is a customer LINUX image auth will be ignored) :type auth: :class:`NodeAuthPassword` or ``str`` or ``None`` :keyword ex_description: description for this node (required) :type ex_description: ``str`` :keyword ex_network: Network to create the node within (required unless using ex_network_domain or ex_primary_ipv4) :type ex_network: :class:`DimensionDataNetwork` or ``str`` :keyword ex_memory_gb: The amount of memory in GB for the server :type ex_memory_gb: ``int`` :keyword ex_cpu_specification: The spec of CPU to deploy ( optional) :type ex_cpu_specification: :class:`DimensionDataServerCpuSpecification` :keyword ex_is_started: Start server after creation? default true (required) :type ex_is_started: ``bool`` :keyword ex_primary_dns: The node's primary DNS :type ex_primary_dns: ``str`` :keyword ex_secondary_dns: The node's secondary DNS :type ex_secondary_dns: ``str`` :return: The newly created :class:`Node`. :rtype: :class:`Node` """ password = None image_needs_auth = self._image_needs_auth(image) if image_needs_auth: if isinstance(auth, basestring): auth_obj = NodeAuthPassword(password=auth) password = auth else: auth_obj = self._get_and_check_auth(auth) password = auth_obj.password server_elm = ET.Element('deployServer', {'xmlns': TYPES_URN}) ET.SubElement(server_elm, "name").text = name ET.SubElement(server_elm, "description").text = ex_description image_id = self._image_to_image_id(image) ET.SubElement(server_elm, "imageId").text = image_id ET.SubElement(server_elm, "start").text = str( ex_is_started).lower() if password is not None: ET.SubElement(server_elm, "administratorPassword").text = password if ex_cpu_specification is not None: cpu = ET.SubElement(server_elm, "cpu") cpu.set('speed', ex_cpu_specification.performance) cpu.set('count', str(ex_cpu_specification.cpu_count)) cpu.set('coresPerSocket', str(ex_cpu_specification.cores_per_socket)) if ex_memory_gb is not None: ET.SubElement(server_elm, "memoryGb").text = str(ex_memory_gb) if ex_network is not None: network_elm = ET.SubElement(server_elm, "network") network_id = self._network_to_network_id(ex_network) ET.SubElement(network_elm, "networkId").text = network_id if ex_primary_dns: dns_elm = ET.SubElement(server_elm, "primaryDns") dns_elm.text = ex_primary_dns if ex_secondary_dns: dns_elm = ET.SubElement(server_elm, "secondaryDns") dns_elm.text = ex_secondary_dns response = self.connection.request_with_orgId_api_2( 'server/deployServer', method='POST', data=ET.tostring(server_elm)).object node_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'serverId': node_id = info.get('value') node = self.ex_get_node_by_id(node_id) if image_needs_auth: if getattr(auth_obj, "generated", False): node.extra['password'] = auth_obj.password return node def create_node(self, name, image, auth, ex_network_domain=None, ex_primary_nic_private_ipv4=None, ex_primary_nic_vlan=None, ex_primary_nic_network_adapter=None, ex_additional_nics=None, ex_description=None, ex_disks=None, ex_cpu_specification=None, ex_memory_gb=None, ex_is_started=True, ex_primary_dns=None, ex_secondary_dns=None, ex_ipv4_gateway=None, ex_microsoft_time_zone=None, **kwargs ): """ Create a new DimensionData node in MCP2. However, it is still backward compatible for MCP1 for a limited time. Please consider using MCP2 datacenter as MCP1 will phase out soon. Legacy Create Node for MCP1 datacenter >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.base import NodeAuthPassword >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = False >>> DimensionData = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Password >>> root_pw = NodeAuthPassword('password123') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU1') >>> >>> # Get network by location >>> my_network = driver.list_networks(location=location)[0] >>> pprint(my_network) >>> >>> # Get Image >>> images = driver.list_images(location=location) >>> image = images[0] >>> >>> node = driver.create_node(name='test_blah_2', image=image, >>> auth=root_pw, >>> ex_description='test3 node', >>> ex_network=my_network, >>> ex_is_started=False) >>> pprint(node) Create Node in MCP2 Data Center >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.base import NodeAuthPassword >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Password >>> root_pw = NodeAuthPassword('password123') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> vlan = driver.ex_list_vlans(location=location, >>> network_domain=my_network_domain)[0] >>> pprint(vlan) >>> >>> # Get Image >>> images = driver.list_images(location=location) >>> image = images[0] >>> >>> # Create node using vlan instead of private IPv4 >>> node = driver.create_node(name='test_server_01', image=image, >>> auth=root_pw, >>> ex_description='test2 node', >>> ex_network_domain=my_network_domain, >>> ex_primary_nic_vlan=vlan, >>> ex_is_started=False) >>> >>> # Option: Create node using private IPv4 instead of vlan >>> # node = driver.create_node(name='test_server_02', image=image, >>> # auth=root_pw, >>> # ex_description='test2 node', >>> # ex_network_domain=my_network_domain, >>> # ex_primary_nic_private_ipv4='10.1.1.7', >>> # ex_is_started=False) >>> >>> # Option: Create node using by specifying Network Adapter >>> # node = driver.create_node(name='test_server_03', image=image, >>> # auth=root_pw, >>> # ex_description='test2 node', >>> # ex_network_domain=my_network_domain, >>> # ex_primary_nic_vlan=vlan, >>> # ex_primary_nic_network_adapter='E1000', >>> # ex_is_started=False) >>> :keyword name: (required) String with a name for this new node :type name: ``str`` :keyword image: (required) OS Image to boot on node. :type image: :class:`NodeImage` or ``str`` :keyword auth: Initial authentication information for the node. (If this is a customer LINUX image auth will be ignored) :type auth: :class:`NodeAuthPassword` or ``str`` or ``None`` :keyword ex_description: (optional) description for this node :type ex_description: ``str`` :keyword ex_network_domain: (required) Network Domain or Network Domain ID to create the node :type ex_network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :keyword ex_primary_nic_private_ipv4: Provide private IPv4. Ignore if ex_primary_nic_vlan is provided. Use one or the other. Not both. :type ex_primary_nic_private_ipv4: :``str`` :keyword ex_primary_nic_vlan: Provide VLAN for the node if ex_primary_nic_private_ipv4 NOT provided. One or the other. Not both. :type ex_primary_nic_vlan: :class: DimensionDataVlan or ``str`` :keyword ex_primary_nic_network_adapter: (Optional) Default value for the Operating System will be used if leave empty. Example: "E1000". :type ex_primary_nic_network_adapter: :``str`` :keyword ex_additional_nics: (optional) List :class:'DimensionDataNic' or None :type ex_additional_nics: ``list`` of :class:'DimensionDataNic' or ``str`` :keyword ex_memory_gb: (optional) The amount of memory in GB for the server Can be used to override the memory value inherited from the source Server Image. :type ex_memory_gb: ``int`` :keyword ex_cpu_specification: (optional) The spec of CPU to deploy :type ex_cpu_specification: :class:`DimensionDataServerCpuSpecification` :keyword ex_is_started: (required) Start server after creation. Default is set to true. :type ex_is_started: ``bool`` :keyword ex_primary_dns: (Optional) The node's primary DNS :type ex_primary_dns: ``str`` :keyword ex_secondary_dns: (Optional) The node's secondary DNS :type ex_secondary_dns: ``str`` :keyword ex_ipv4_gateway: (Optional) IPv4 address in dot-decimal notation, which will be used as the Primary NIC gateway instead of the default gateway assigned by the system. If ipv4Gateway is provided it does not have to be on the VLAN of the Primary NIC but MUST be reachable or the Guest OS will not be configured correctly. :type ex_ipv4_gateway: ``str`` :keyword ex_disks: (optional) Dimensiondata disks. Optional disk elements can be used to define the disk speed that each disk on the Server; inherited from the source Server Image will be deployed to. It is not necessary to include a diskelement for every disk; only those that you wish to set a disk speed value for. Note that scsiId 7 cannot be used.Up to 13 disks can be present in addition to the required OS disk on SCSI ID 0. Refer to https://docs.mcp-services.net/x/UwIu for disk :type ex_disks: List or tuple of :class:'DimensionDataServerDisk` :keyword ex_microsoft_time_zone: (optional) For use with Microsoft Windows source Server Images only. For the exact value to use please refer to the table of time zone indexes in the following Microsoft Technet documentation. If none is supplied, the default time zone for the data center geographic region will be used. :type ex_microsoft_time_zone: `str`` :return: The newly created :class:`Node`. :rtype: :class:`Node` """ # Neither legacy MCP1 network nor MCP2 network domain provided if ex_network_domain is None and 'ex_network' not in kwargs: raise ValueError('You must provide either ex_network_domain ' 'for MCP2 or ex_network for legacy MCP1') # Ambiguous parameter provided. Can't determine if it is MCP 1 or 2. if ex_network_domain is not None and 'ex_network' in kwargs: raise ValueError('You can only supply either ' 'ex_network_domain ' 'for MCP2 or ex_network for legacy MCP1') # Set ex_is_started to False by default if none bool data type provided if not isinstance(ex_is_started, bool): ex_is_started = True # Handle MCP1 legacy if 'ex_network' in kwargs: new_node = self._create_node_mcp1( name=name, image=image, auth=auth, ex_network=kwargs.get("ex_network"), ex_description=ex_description, ex_memory_gb=ex_memory_gb, ex_cpu_specification=ex_cpu_specification, ex_is_started=ex_is_started, ex_primary_ipv4=ex_primary_nic_private_ipv4, ex_disks=ex_disks, ex_additional_nics_vlan=kwargs.get("ex_additional_nics_vlan"), ex_additional_nics_ipv4=kwargs.get("ex_additional_nics_ipv4"), ex_primary_dns=ex_primary_dns, ex_secondary_dns=ex_secondary_dns ) else: # Handle MCP2 legacy. CaaS api 2.2 or earlier if 'ex_vlan' in kwargs: ex_primary_nic_vlan = kwargs.get('ex_vlan') if 'ex_primary_ipv4' in kwargs: ex_primary_nic_private_ipv4 = kwargs.get( 'ex_primary_ipv4') additional_nics = [] if 'ex_additional_nics_vlan' in kwargs: vlans = kwargs.get('ex_additional_nics_vlan') if isinstance(vlans, (list, tuple)): for v in vlans: add_nic = DimensionDataNic(vlan=v) additional_nics.append(add_nic) else: raise TypeError("ex_additional_nics_vlan must " "be None or a tuple/list") if 'ex_additional_nics_ipv4' in kwargs: ips = kwargs.get('ex_additional_nics_ipv4') if isinstance(ips, (list, tuple)): for ip in ips: add_nic = DimensionDataNic(private_ip_v4=ip) additional_nics.append(add_nic) else: if ips is not None: raise TypeError("ex_additional_nics_ipv4 must " "be None or a tuple/list") if ('ex_additional_nics_vlan' in kwargs or 'ex_additional_nics_ipv4' in kwargs): ex_additional_nics = additional_nics # Handle MCP2 latest. CaaS API 2.3 onwards if ex_network_domain is None: raise ValueError("ex_network_domain must be specified") password = None image_needs_auth = self._image_needs_auth(image) if image_needs_auth: if isinstance(auth, basestring): auth_obj = NodeAuthPassword(password=auth) password = auth else: auth_obj = self._get_and_check_auth(auth) password = auth_obj.password server_elm = ET.Element('deployServer', {'xmlns': TYPES_URN}) ET.SubElement(server_elm, "name").text = name ET.SubElement(server_elm, "description").text = ex_description image_id = self._image_to_image_id(image) ET.SubElement(server_elm, "imageId").text = image_id ET.SubElement(server_elm, "start").text = str( ex_is_started).lower() if password is not None: ET.SubElement(server_elm, "administratorPassword").text = password if ex_cpu_specification is not None: cpu = ET.SubElement(server_elm, "cpu") cpu.set('speed', ex_cpu_specification.performance) cpu.set('count', str(ex_cpu_specification.cpu_count)) cpu.set('coresPerSocket', str(ex_cpu_specification.cores_per_socket)) if ex_memory_gb is not None: ET.SubElement(server_elm, "memoryGb").text = str(ex_memory_gb) if (ex_primary_nic_private_ipv4 is None and ex_primary_nic_vlan is None): raise ValueError("Missing argument. Either " "ex_primary_nic_private_ipv4 or " "ex_primary_nic_vlan " "must be specified.") if (ex_primary_nic_private_ipv4 is not None and ex_primary_nic_vlan is not None): raise ValueError("Either ex_primary_nic_private_ipv4 or " "ex_primary_nic_vlan " "be specified. Not both.") network_elm = ET.SubElement(server_elm, "networkInfo") net_domain_id = self._network_domain_to_network_domain_id( ex_network_domain) network_elm.set('networkDomainId', net_domain_id) pri_nic = ET.SubElement(network_elm, 'primaryNic') if ex_primary_nic_private_ipv4 is not None: ET.SubElement(pri_nic, 'privateIpv4').text = ex_primary_nic_private_ipv4 if ex_primary_nic_vlan is not None: vlan_id = self._vlan_to_vlan_id(ex_primary_nic_vlan) ET.SubElement(pri_nic, 'vlanId').text = vlan_id if ex_primary_nic_network_adapter is not None: ET.SubElement(pri_nic, "networkAdapter").text = \ ex_primary_nic_network_adapter if isinstance(ex_additional_nics, (list, tuple)): for nic in ex_additional_nics: additional_nic = ET.SubElement(network_elm, 'additionalNic') if (nic.private_ip_v4 is None and nic.vlan is None): raise ValueError("Either a vlan or private_ip_v4 " "must be specified for each " "additional nic.") if (nic.private_ip_v4 is not None and nic.vlan is not None): raise ValueError("Either a vlan or private_ip_v4 " "must be specified for each " "additional nic. Not both.") if nic.private_ip_v4 is not None: ET.SubElement(additional_nic, 'privateIpv4').text = nic.private_ip_v4 if nic.vlan is not None: vlan_id = self._vlan_to_vlan_id(nic.vlan) ET.SubElement(additional_nic, 'vlanId').text = vlan_id if nic.network_adapter_name is not None: ET.SubElement(additional_nic, "networkAdapter").text = \ nic.network_adapter_name elif ex_additional_nics is not None: raise TypeError( "ex_additional_NICs must be None or tuple/list") if ex_primary_dns: dns_elm = ET.SubElement(server_elm, "primaryDns") dns_elm.text = ex_primary_dns if ex_secondary_dns: dns_elm = ET.SubElement(server_elm, "secondaryDns") dns_elm.text = ex_secondary_dns if ex_ipv4_gateway: ET.SubElement(server_elm, "ipv4Gateway").text = ex_ipv4_gateway if isinstance(ex_disks, (list, tuple)): for disk in ex_disks: disk_elm = ET.SubElement(server_elm, 'disk') disk_elm.set('scsiId', disk.scsi_id) disk_elm.set('speed', disk.speed) elif ex_disks is not None: raise TypeError("ex_disks must be None or tuple/list") if ex_microsoft_time_zone: ET.SubElement(server_elm, "microsoftTimeZone").text = \ ex_microsoft_time_zone response = self.connection.request_with_orgId_api_2( 'server/deployServer', method='POST', data=ET.tostring(server_elm)).object node_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'serverId': node_id = info.get('value') new_node = self.ex_get_node_by_id(node_id) if image_needs_auth: if getattr(auth_obj, "generated", False): new_node.extra['password'] = auth_obj.password return new_node def destroy_node(self, node): """ Deletes a node, node must be stopped before deletion :keyword node: The node to delete :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('deleteServer', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/deleteServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def reboot_node(self, node): """ Reboots a node by requesting the OS restart via the hypervisor :keyword node: The node to reboot :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('rebootServer', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/rebootServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def list_nodes(self, ex_location=None, ex_name=None, ex_ipv6=None, ex_ipv4=None, ex_vlan=None, ex_image=None, ex_deployed=None, ex_started=None, ex_state=None, ex_network=None, ex_network_domain=None): """ List nodes deployed for your organization. :keyword ex_location: Filters the node list to nodes that are located in this location :type ex_location: :class:`NodeLocation` or ``str`` :keyword ex_name: Filters the node list to nodes that have this name :type ex_name ``str`` :keyword ex_ipv6: Filters the node list to nodes that have this ipv6 address :type ex_ipv6: ``str`` :keyword ex_ipv4: Filters the node list to nodes that have this ipv4 address :type ex_ipv4: ``str`` :keyword ex_vlan: Filters the node list to nodes that are in this VLAN :type ex_vlan: :class:`DimensionDataVlan` or ``str`` :keyword ex_image: Filters the node list to nodes that have this image :type ex_image: :class:`NodeImage` or ``str`` :keyword ex_deployed: Filters the node list to nodes that are deployed or not :type ex_deployed: ``bool`` :keyword ex_started: Filters the node list to nodes that are started or not :type ex_started: ``bool`` :keyword ex_state: Filters the node list by nodes that are in this state :type ex_state: ``str`` :keyword ex_network: Filters the node list to nodes in this network :type ex_network: :class:`DimensionDataNetwork` or ``str`` :keyword ex_network_domain: Filters the node list to nodes in this network domain :type ex_network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :return: a list of `Node` objects :rtype: ``list`` of :class:`Node` """ node_list = [] for nodes in self.ex_list_nodes_paginated( location=ex_location, name=ex_name, ipv6=ex_ipv6, ipv4=ex_ipv4, vlan=ex_vlan, image=ex_image, deployed=ex_deployed, started=ex_started, state=ex_state, network=ex_network, network_domain=ex_network_domain): node_list.extend(nodes) return node_list def list_images(self, location=None): """ List images available Note: Currently only returns the default 'base OS images' provided by DimensionData. Customer images (snapshots) use ex_list_customer_images :keyword ex_location: Filters the node list to nodes that are located in this location :type ex_location: :class:`NodeLocation` or ``str`` :return: List of images available :rtype: ``list`` of :class:`NodeImage` """ params = {} if location is not None: params['datacenterId'] = self._location_to_location_id(location) return self._to_images( self.connection.request_with_orgId_api_2( 'image/osImage', params=params) .object) def list_sizes(self, location=None): """ return a list of available sizes Currently, the size of the node is dictated by the chosen OS base image, they cannot be set explicitly. @inherits: :class:`NodeDriver.list_sizes` """ return [ NodeSize(id=1, name="default", ram=0, disk=0, bandwidth=0, price=0, driver=self.connection.driver), ] def list_locations(self, ex_id=None): """ List locations (datacenters) available for instantiating servers and networks. :keyword ex_id: Filters the location list to this id :type ex_id: ``str`` :return: List of locations :rtype: ``list`` of :class:`NodeLocation` """ params = {} if ex_id is not None: params['id'] = ex_id return self._to_locations( self.connection .request_with_orgId_api_2( 'infrastructure/datacenter', params=params ).object ) def list_networks(self, location=None): """ List networks deployed across all data center locations for your organization. The response includes the location of each network. :keyword location: The location :type location: :class:`NodeLocation` or ``str`` :return: a list of DimensionDataNetwork objects :rtype: ``list`` of :class:`DimensionDataNetwork` """ url_ext = '' if location is not None: url_ext = '/' + self._location_to_location_id(location) return self._to_networks( self.connection .request_with_orgId_api_1('networkWithLocation%s' % url_ext) .object) def import_image(self, ovf_package_name, name, cluster_id=None, datacenter_id=None, description=None, is_guest_os_customization=None, tagkey_name_value_dictionaries=None): """ Import image :param ovf_package_name: Image OVF package name :type ovf_package_name: ``str`` :param name: Image name :type name: ``str`` :param cluster_id: Provide either cluster_id or datacenter_id :type cluster_id: ``str`` :param datacenter_id: Provide either cluster_id or datacenter_id :type datacenter_id: ``str`` :param description: Optional. Description of image :type description: ``str`` :param is_guest_os_customization: Optional. true for NGOC image :type is_guest_os_customization: ``bool`` :param tagkey_name_value_dictionaries: Optional tagkey name value dict :type tagkey_name_value_dictionaries: dictionaries :return: Return true if successful :rtype: ``bool`` """ # Unsupported for version lower than 2.4 if LooseVersion(self.connection.active_api_version) < LooseVersion( '2.4'): raise Exception("import image is feature is NOT supported in " "api version earlier than 2.4") elif cluster_id is None and datacenter_id is None: raise ValueError("Either cluster_id or datacenter_id must be " "provided") elif cluster_id is not None and datacenter_id is not None: raise ValueError("Cannot accept both cluster_id and " "datacenter_id. Please provide either one") else: import_image_elem = ET.Element( 'urn:importImage', { 'xmlns:urn': TYPES_URN, }) ET.SubElement( import_image_elem, 'urn:ovfPackage' ).text = ovf_package_name ET.SubElement( import_image_elem, 'urn:name' ).text = name if description is not None: ET.SubElement( import_image_elem, 'urn:description' ).text = description if cluster_id is not None: ET.SubElement( import_image_elem, 'urn:clusterId' ).text = cluster_id else: ET.SubElement( import_image_elem, 'urn:datacenterId' ).text = datacenter_id if is_guest_os_customization is not None: ET.SubElement( import_image_elem, 'urn:guestOsCustomization' ).text = is_guest_os_customization if len(tagkey_name_value_dictionaries) > 0: for k, v in tagkey_name_value_dictionaries.items(): tag_elem = ET.SubElement( import_image_elem, 'urn:tag') ET.SubElement(tag_elem, 'urn:tagKeyName').text = k if v is not None: ET.SubElement(tag_elem, 'urn:value').text = v response = self.connection.request_with_orgId_api_2( 'image/importImage', method='POST', data=ET.tostring(import_image_elem)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_nodes_paginated(self, name=None, location=None, ipv6=None, ipv4=None, vlan=None, image=None, deployed=None, started=None, state=None, network=None, network_domain=None): """ Return a generator which yields node lists in pages :keyword location: Filters the node list to nodes that are located in this location :type location: :class:`NodeLocation` or ``str`` :keyword name: Filters the node list to nodes that have this name :type name ``str`` :keyword ipv6: Filters the node list to nodes that have this ipv6 address :type ipv6: ``str`` :keyword ipv4: Filters the node list to nodes that have this ipv4 address :type ipv4: ``str`` :keyword vlan: Filters the node list to nodes that are in this VLAN :type vlan: :class:`DimensionDataVlan` or ``str`` :keyword image: Filters the node list to nodes that have this image :type image: :class:`NodeImage` or ``str`` :keyword deployed: Filters the node list to nodes that are deployed or not :type deployed: ``bool`` :keyword started: Filters the node list to nodes that are started or not :type started: ``bool`` :keyword state: Filters the node list to nodes that are in this state :type state: ``str`` :keyword network: Filters the node list to nodes in this network :type network: :class:`DimensionDataNetwork` or ``str`` :keyword network_domain: Filters the node list to nodes in this network domain :type network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :return: a list of `Node` objects :rtype: ``generator`` of `list` of :class:`Node` """ params = {} if location is not None: params['datacenterId'] = self._location_to_location_id(location) if ipv6 is not None: params['ipv6'] = ipv6 if ipv4 is not None: params['privateIpv4'] = ipv4 if state is not None: params['state'] = state if started is not None: params['started'] = started if deployed is not None: params['deployed'] = deployed if name is not None: params['name'] = name if network_domain is not None: params['networkDomainId'] = \ self._network_domain_to_network_domain_id(network_domain) if network is not None: params['networkId'] = self._network_to_network_id(network) if vlan is not None: params['vlanId'] = self._vlan_to_vlan_id(vlan) if image is not None: params['sourceImageId'] = self._image_to_image_id(image) nodes_obj = self._list_nodes_single_page(params) yield self._to_nodes(nodes_obj) while nodes_obj.get('pageCount') >= nodes_obj.get('pageSize'): params['pageNumber'] = int(nodes_obj.get('pageNumber')) + 1 nodes_obj = self._list_nodes_single_page(params) yield self._to_nodes(nodes_obj) def ex_start_node(self, node): """ Powers on an existing deployed server :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('startServer', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/startServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_shutdown_graceful(self, node): """ This function will attempt to "gracefully" stop a server by initiating a shutdown sequence within the guest operating system. A successful response on this function means the system has successfully passed the request into the operating system. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('shutdownServer', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/shutdownServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_power_off(self, node): """ This function will abruptly power-off a server. Unlike ex_shutdown_graceful, success ensures the node will stop but some OS and application configurations may be adversely affected by the equivalent of pulling the power plug out of the machine. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('powerOffServer', {'xmlns': TYPES_URN, 'id': node.id}) try: body = self.connection.request_with_orgId_api_2( 'server/powerOffServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) except (DimensionDataAPIException, NameError, BaseHTTPError): r = self.ex_get_node_by_id(node.id) response_code = r.state.upper() return response_code in ['IN_PROGRESS', 'OK', 'STOPPED', 'STOPPING'] def ex_reset(self, node): """ This function will abruptly reset a server. Unlike reboot_node, success ensures the node will restart but some OS and application configurations may be adversely affected by the equivalent of pulling the power plug out of the machine. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('resetServer', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/resetServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_update_vm_tools(self, node): """ This function triggers an update of the VMware Tools software running on the guest OS of a Server. :param node: Node which should be used :type node: :class:`Node` :rtype: ``bool`` """ request_elm = ET.Element('updateVmwareTools', {'xmlns': TYPES_URN, 'id': node.id}) body = self.connection.request_with_orgId_api_2( 'server/updateVmwareTools', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_update_node(self, node, name=None, description=None, cpu_count=None, ram_mb=None): """ Update the node, the name, CPU or RAM :param node: Node which should be used :type node: :class:`Node` :param name: The new name (optional) :type name: ``str`` :param description: The new description (optional) :type description: ``str`` :param cpu_count: The new CPU count (optional) :type cpu_count: ``int`` :param ram_mb: The new Memory in MB (optional) :type ram_mb: ``int`` :rtype: ``bool`` """ data = {} if name is not None: data['name'] = name if description is not None: data['description'] = description if cpu_count is not None: data['cpuCount'] = str(cpu_count) if ram_mb is not None: data['memory'] = str(ram_mb) body = self.connection.request_with_orgId_api_1( 'server/%s' % (node.id), method='POST', data=urlencode(data, True)).object response_code = findtext(body, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_create_anti_affinity_rule(self, node_list): """ Create an anti affinity rule given a list of nodes Anti affinity rules ensure that servers will not reside on the same VMware ESX host :param node_list: The list of nodes to create a rule for :type node_list: ``list`` of :class:`Node` or ``list`` of ``str`` :rtype: ``bool`` """ if not isinstance(node_list, (list, tuple)): raise TypeError("Node list must be a list or a tuple.") anti_affinity_xml_request = ET.Element('NewAntiAffinityRule', {'xmlns': SERVER_NS}) for node in node_list: ET.SubElement(anti_affinity_xml_request, 'serverId').text = \ self._node_to_node_id(node) result = self.connection.request_with_orgId_api_1( 'antiAffinityRule', method='POST', data=ET.tostring(anti_affinity_xml_request)).object response_code = findtext(result, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_delete_anti_affinity_rule(self, anti_affinity_rule): """ Remove anti affinity rule :param anti_affinity_rule: The anti affinity rule to delete :type anti_affinity_rule: :class:`DimensionDataAntiAffinityRule` or ``str`` :rtype: ``bool`` """ rule_id = self._anti_affinity_rule_to_anti_affinity_rule_id( anti_affinity_rule) result = self.connection.request_with_orgId_api_1( 'antiAffinityRule/%s?delete' % (rule_id), method='GET').object response_code = findtext(result, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_list_anti_affinity_rules(self, network=None, network_domain=None, node=None, filter_id=None, filter_state=None): """ List anti affinity rules for a network, network domain, or node :param network: The network to list anti affinity rules for One of network, network_domain, or node is required :type network: :class:`DimensionDataNetwork` or ``str`` :param network_domain: The network domain to list anti affinity rules One of network, network_domain, or node is required :type network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :param node: The node to list anti affinity rules for One of network, netwok_domain, or node is required :type node: :class:`Node` or ``str`` :param filter_id: This will allow you to filter the rules by this node id :type filter_id: ``str`` :type filter_state: This will allow you to filter rules by node state (i.e. NORMAL) :type filter_state: ``str`` :rtype: ``list`` of :class:`DimensionDataAntiAffinityRule` """ not_none_arguments = [key for key in (network, network_domain, node) if key is not None] if len(not_none_arguments) != 1: raise ValueError("One and ONLY one of network, " "network_domain, or node must be set") params = {} if network_domain is not None: params['networkDomainId'] = \ self._network_domain_to_network_domain_id(network_domain) if network is not None: params['networkId'] = \ self._network_to_network_id(network) if node is not None: params['serverId'] = \ self._node_to_node_id(node) if filter_id is not None: params['id'] = filter_id if filter_state is not None: params['state'] = filter_state paged_result = self.connection.paginated_request_with_orgId_api_2( 'server/antiAffinityRule', method='GET', params=params ) rules = [] for result in paged_result: rules.extend(self._to_anti_affinity_rules(result)) return rules def ex_attach_node_to_vlan(self, node, vlan=None, private_ipv4=None): """ Attach a node to a VLAN by adding an additional NIC to the node on the target VLAN. The IP will be automatically assigned based on the VLAN IP network space. Alternatively, provide a private IPv4 address instead of VLAN information, and this will be assigned to the node on corresponding NIC. :param node: Node which should be used :type node: :class:`Node` :param vlan: VLAN to attach the node to (required unless private_ipv4) :type vlan: :class:`DimensionDataVlan` :keyword private_ipv4: Private nic IPv4 Address (required unless vlan) :type private_ipv4: ``str`` :rtype: ``bool`` """ request = ET.Element('addNic', {'xmlns': TYPES_URN}) ET.SubElement(request, 'serverId').text = node.id nic = ET.SubElement(request, 'nic') if vlan is not None: ET.SubElement(nic, 'vlanId').text = vlan.id elif private_ipv4 is not None: ET.SubElement(nic, 'privateIpv4').text = private_ipv4 else: raise ValueError("One of vlan or primary_ipv4 " "must be specified") response = self.connection.request_with_orgId_api_2( 'server/addNic', method='POST', data=ET.tostring(request)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_destroy_nic(self, nic_id): """ Remove a NIC on a node, removing the node from a VLAN :param nic_id: The identifier of the NIC to remove :type nic_id: ``str`` :rtype: ``bool`` """ request = ET.Element('removeNic', {'xmlns': TYPES_URN, 'id': nic_id}) response = self.connection.request_with_orgId_api_2( 'server/removeNic', method='POST', data=ET.tostring(request)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_networks(self, location=None): """ List networks deployed across all data center locations for your organization. The response includes the location of each network. :param location: The target location :type location: :class:`NodeLocation` or ``str`` :return: a list of DimensionDataNetwork objects :rtype: ``list`` of :class:`DimensionDataNetwork` """ return self.list_networks(location=location) def ex_create_network(self, location, name, description=None): """ Create a new network in an MCP 1.0 location :param location: The target location (MCP1) :type location: :class:`NodeLocation` or ``str`` :param name: The name of the network :type name: ``str`` :param description: Additional description of the network :type description: ``str`` :return: A new instance of `DimensionDataNetwork` :rtype: Instance of :class:`DimensionDataNetwork` """ network_location = self._location_to_location_id(location) create_node = ET.Element('NewNetworkWithLocation', {'xmlns': NETWORK_NS}) ET.SubElement(create_node, "name").text = name if description is not None: ET.SubElement(create_node, "description").text = description ET.SubElement(create_node, "location").text = network_location self.connection.request_with_orgId_api_1( 'networkWithLocation', method='POST', data=ET.tostring(create_node)) # MCP1 API does not return the ID, but name is unique for location network = list( filter(lambda x: x.name == name, self.ex_list_networks(location)))[0] return network def ex_delete_network(self, network): """ Delete a network from an MCP 1 data center :param network: The network to delete :type network: :class:`DimensionDataNetwork` :rtype: ``bool`` """ response = self.connection.request_with_orgId_api_1( 'network/%s?delete' % network.id, method='GET').object response_code = findtext(response, 'result', GENERAL_NS) return response_code == "SUCCESS" def ex_rename_network(self, network, new_name): """ Rename a network in MCP 1 data center :param network: The network to rename :type network: :class:`DimensionDataNetwork` :param new_name: The new name of the network :type new_name: ``str`` :rtype: ``bool`` """ response = self.connection.request_with_orgId_api_1( 'network/%s' % network.id, method='POST', data='name=%s' % new_name).object response_code = findtext(response, 'result', GENERAL_NS) return response_code == "SUCCESS" def ex_get_network_domain(self, network_domain_id): """ Get an individual Network Domain, by identifier :param network_domain_id: The identifier of the network domain :type network_domain_id: ``str`` :rtype: :class:`DimensionDataNetworkDomain` """ locations = self.list_locations() net = self.connection.request_with_orgId_api_2( 'network/networkDomain/%s' % network_domain_id).object return self._to_network_domain(net, locations) def ex_list_network_domains(self, location=None, name=None, service_plan=None, state=None): """ List networks domains deployed across all data center locations domain. for your organization. The response includes the location of each network :param location: Only network domains in the location (optional) :type location: :class:`NodeLocation` or ``str`` :param name: Only network domains of this name (optional) :type name: ``str`` :param service_plan: Only network domains of this type (optional) :type service_plan: ``str`` :param state: Only network domains in this state (optional) :type state: ``str`` :return: a list of `DimensionDataNetwork` objects :rtype: ``list`` of :class:`DimensionDataNetwork` """ params = {} if location is not None: params['datacenterId'] = self._location_to_location_id(location) if name is not None: params['name'] = name if service_plan is not None: params['type'] = service_plan if state is not None: params['state'] = state response = self.connection \ .request_with_orgId_api_2('network/networkDomain', params=params).object return self._to_network_domains(response) def ex_create_network_domain(self, location, name, service_plan, description=None): """ Deploy a new network domain to a data center :param location: The data center to list :type location: :class:`NodeLocation` or ``str`` :param name: The name of the network domain to create :type name: ``str`` :param service_plan: The service plan, either "ESSENTIALS" or "ADVANCED" :type service_plan: ``str`` :param description: An additional description of the network domain :type description: ``str`` :return: an instance of `DimensionDataNetworkDomain` :rtype: :class:`DimensionDataNetworkDomain` """ create_node = ET.Element('deployNetworkDomain', {'xmlns': TYPES_URN}) ET.SubElement( create_node, "datacenterId" ).text = self._location_to_location_id(location) ET.SubElement(create_node, "name").text = name if description is not None: ET.SubElement(create_node, "description").text = description ET.SubElement(create_node, "type").text = service_plan response = self.connection.request_with_orgId_api_2( 'network/deployNetworkDomain', method='POST', data=ET.tostring(create_node)).object network_domain_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'networkDomainId': network_domain_id = info.get('value') return DimensionDataNetworkDomain( id=network_domain_id, name=name, description=description, location=location, status=NodeState.RUNNING, plan=service_plan ) def ex_update_network_domain(self, network_domain): """ Update the properties of a network domain :param network_domain: The network domain with updated properties :type network_domain: :class:`DimensionDataNetworkDomain` :return: an instance of `DimensionDataNetworkDomain` :rtype: :class:`DimensionDataNetworkDomain` """ edit_node = ET.Element('editNetworkDomain', {'xmlns': TYPES_URN}) edit_node.set('id', network_domain.id) ET.SubElement(edit_node, "name").text = network_domain.name if network_domain.description is not None: ET.SubElement(edit_node, "description").text \ = network_domain.description ET.SubElement(edit_node, "type").text = network_domain.plan self.connection.request_with_orgId_api_2( 'network/editNetworkDomain', method='POST', data=ET.tostring(edit_node)).object return network_domain def ex_delete_network_domain(self, network_domain): """ Delete a network domain :param network_domain: The network domain to delete :type network_domain: :class:`DimensionDataNetworkDomain` :rtype: ``bool`` """ delete_node = ET.Element('deleteNetworkDomain', {'xmlns': TYPES_URN}) delete_node.set('id', network_domain.id) result = self.connection.request_with_orgId_api_2( 'network/deleteNetworkDomain', method='POST', data=ET.tostring(delete_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_create_vlan(self, network_domain, name, private_ipv4_base_address, description=None, private_ipv4_prefix_size=24): """ Deploy a new VLAN to a network domain :param network_domain: The network domain to add the VLAN to :type network_domain: :class:`DimensionDataNetworkDomain` :param name: The name of the VLAN to create :type name: ``str`` :param private_ipv4_base_address: The base IPv4 address e.g. 192.168.1.0 :type private_ipv4_base_address: ``str`` :param description: An additional description of the VLAN :type description: ``str`` :param private_ipv4_prefix_size: The size of the IPv4 address space, e.g 24 :type private_ipv4_prefix_size: ``int`` :return: an instance of `DimensionDataVlan` :rtype: :class:`DimensionDataVlan` """ create_node = ET.Element('deployVlan', {'xmlns': TYPES_URN}) ET.SubElement(create_node, "networkDomainId").text = network_domain.id ET.SubElement(create_node, "name").text = name if description is not None: ET.SubElement(create_node, "description").text = description ET.SubElement(create_node, "privateIpv4BaseAddress").text = \ private_ipv4_base_address ET.SubElement(create_node, "privateIpv4PrefixSize").text = \ str(private_ipv4_prefix_size) response = self.connection.request_with_orgId_api_2( 'network/deployVlan', method='POST', data=ET.tostring(create_node)).object vlan_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'vlanId': vlan_id = info.get('value') return self.ex_get_vlan(vlan_id) def ex_get_vlan(self, vlan_id): """ Get a single VLAN, by it's identifier :param vlan_id: The identifier of the VLAN :type vlan_id: ``str`` :return: an instance of `DimensionDataVlan` :rtype: :class:`DimensionDataVlan` """ locations = self.list_locations() vlan = self.connection.request_with_orgId_api_2( 'network/vlan/%s' % vlan_id).object return self._to_vlan(vlan, locations) def ex_update_vlan(self, vlan): """ Updates the properties of the given VLAN Only name and description are updated :param vlan: The VLAN to update :type vlan: :class:`DimensionDataNetworkDomain` :return: an instance of `DimensionDataVlan` :rtype: :class:`DimensionDataVlan` """ edit_node = ET.Element('editVlan', {'xmlns': TYPES_URN}) edit_node.set('id', vlan.id) ET.SubElement(edit_node, "name").text = vlan.name if vlan.description is not None: ET.SubElement(edit_node, "description").text \ = vlan.description self.connection.request_with_orgId_api_2( 'network/editVlan', method='POST', data=ET.tostring(edit_node)).object return vlan def ex_expand_vlan(self, vlan): """ Expands the VLAN to the prefix size in private_ipv4_range_size The expansion will not be permitted if the proposed IP space overlaps with an already deployed VLANs IP space. :param vlan: The VLAN to update :type vlan: :class:`DimensionDataNetworkDomain` :return: an instance of `DimensionDataVlan` :rtype: :class:`DimensionDataVlan` """ edit_node = ET.Element('expandVlan', {'xmlns': TYPES_URN}) edit_node.set('id', vlan.id) ET.SubElement(edit_node, "privateIpv4PrefixSize").text =\ vlan.private_ipv4_range_size self.connection.request_with_orgId_api_2( 'network/expandVlan', method='POST', data=ET.tostring(edit_node)).object return vlan def ex_delete_vlan(self, vlan): """ Deletes an existing VLAN :param vlan: The VLAN to delete :type vlan: :class:`DimensionDataNetworkDomain` :rtype: ``bool`` """ delete_node = ET.Element('deleteVlan', {'xmlns': TYPES_URN}) delete_node.set('id', vlan.id) result = self.connection.request_with_orgId_api_2( 'network/deleteVlan', method='POST', data=ET.tostring(delete_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_vlans(self, location=None, network_domain=None, name=None, ipv4_address=None, ipv6_address=None, state=None): """ List VLANs available, can filter by location and/or network domain :param location: Only VLANs in this location (optional) :type location: :class:`NodeLocation` or ``str`` :param network_domain: Only VLANs in this domain (optional) :type network_domain: :class:`DimensionDataNetworkDomain` :param name: Only VLANs with this name (optional) :type name: ``str`` :param ipv4_address: Only VLANs with this ipv4 address (optional) :type ipv4_address: ``str`` :param ipv6_address: Only VLANs with this ipv6 address (optional) :type ipv6_address: ``str`` :param state: Only VLANs with this state (optional) :type state: ``str`` :return: a list of DimensionDataVlan objects :rtype: ``list`` of :class:`DimensionDataVlan` """ params = {} if location is not None: params['datacenterId'] = self._location_to_location_id(location) if network_domain is not None: params['networkDomainId'] = \ self._network_domain_to_network_domain_id(network_domain) if name is not None: params['name'] = name if ipv4_address is not None: params['privateIpv4Address'] = ipv4_address if ipv6_address is not None: params['ipv6Address'] = ipv6_address if state is not None: params['state'] = state response = self.connection.request_with_orgId_api_2('network/vlan', params=params) \ .object return self._to_vlans(response) def ex_add_public_ip_block_to_network_domain(self, network_domain): add_node = ET.Element('addPublicIpBlock', {'xmlns': TYPES_URN}) ET.SubElement(add_node, "networkDomainId").text =\ network_domain.id response = self.connection.request_with_orgId_api_2( 'network/addPublicIpBlock', method='POST', data=ET.tostring(add_node)).object block_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'ipBlockId': block_id = info.get('value') return self.ex_get_public_ip_block(block_id) def ex_list_public_ip_blocks(self, network_domain): params = {} params['networkDomainId'] = network_domain.id response = self.connection \ .request_with_orgId_api_2('network/publicIpBlock', params=params).object return self._to_ip_blocks(response) def ex_get_public_ip_block(self, block_id): locations = self.list_locations() block = self.connection.request_with_orgId_api_2( 'network/publicIpBlock/%s' % block_id).object return self._to_ip_block(block, locations) def ex_delete_public_ip_block(self, block): delete_node = ET.Element('removePublicIpBlock', {'xmlns': TYPES_URN}) delete_node.set('id', block.id) result = self.connection.request_with_orgId_api_2( 'network/removePublicIpBlock', method='POST', data=ET.tostring(delete_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_get_node_by_id(self, id): node = self.connection.request_with_orgId_api_2( 'server/server/%s' % id).object return self._to_node(node) def ex_list_firewall_rules(self, network_domain, page_size=50, page_number=1): params = {'pageSize': page_size, 'pageNumber': page_number} params['networkDomainId'] = self._network_domain_to_network_domain_id( network_domain) response = self.connection \ .request_with_orgId_api_2('network/firewallRule', params=params).object return self._to_firewall_rules(response, network_domain) def ex_create_firewall_rule(self, network_domain, rule, position, position_relative_to_rule=None): """ Creates a firewall rule :param network_domain: The network domain in which to create the firewall rule :type network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :param rule: The rule in which to create :type rule: :class:`DimensionDataFirewallRule` :param position: The position in which to create the rule There are two types of positions with position_relative_to_rule arg and without it With: 'BEFORE' or 'AFTER' Without: 'FIRST' or 'LAST' :type position: ``str`` :param position_relative_to_rule: The rule or rule name in which to decide positioning by :type position_relative_to_rule: :class:`DimensionDataFirewallRule` or ``str`` :rtype: ``bool`` """ positions_without_rule = ('FIRST', 'LAST') positions_with_rule = ('BEFORE', 'AFTER') create_node = ET.Element('createFirewallRule', {'xmlns': TYPES_URN}) ET.SubElement(create_node, "networkDomainId").text = \ self._network_domain_to_network_domain_id(network_domain) ET.SubElement(create_node, "name").text = rule.name ET.SubElement(create_node, "action").text = rule.action ET.SubElement(create_node, "ipVersion").text = rule.ip_version ET.SubElement(create_node, "protocol").text = rule.protocol # Setup source port rule source = ET.SubElement(create_node, "source") if rule.source.address_list_id is not None: source_ip = ET.SubElement(source, 'ipAddressListId') source_ip.text = rule.source.address_list_id else: source_ip = ET.SubElement(source, 'ip') if rule.source.any_ip: source_ip.set('address', 'ANY') else: source_ip.set('address', rule.source.ip_address) if rule.source.ip_prefix_size is not None: source_ip.set('prefixSize', str(rule.source.ip_prefix_size)) if rule.source.port_list_id is not None: source_port = ET.SubElement(source, 'portListId') source_port.text = rule.source.port_list_id else: if rule.source.port_begin is not None: source_port = ET.SubElement(source, 'port') source_port.set('begin', rule.source.port_begin) if rule.source.port_end is not None: source_port.set('end', rule.source.port_end) # Setup destination port rule dest = ET.SubElement(create_node, "destination") if rule.destination.address_list_id is not None: dest_ip = ET.SubElement(dest, 'ipAddressListId') dest_ip.text = rule.destination.address_list_id else: dest_ip = ET.SubElement(dest, 'ip') if rule.destination.any_ip: dest_ip.set('address', 'ANY') else: dest_ip.set('address', rule.destination.ip_address) if rule.destination.ip_prefix_size is not None: dest_ip.set('prefixSize', rule.destination.ip_prefix_size) if rule.destination.port_list_id is not None: dest_port = ET.SubElement(dest, 'portListId') dest_port.text = rule.destination.port_list_id else: if rule.destination.port_begin is not None: dest_port = ET.SubElement(dest, 'port') dest_port.set('begin', rule.destination.port_begin) if rule.destination.port_end is not None: dest_port.set('end', rule.destination.port_end) # Set up positioning of rule ET.SubElement(create_node, "enabled").text = str(rule.enabled).lower() placement = ET.SubElement(create_node, "placement") if position_relative_to_rule is not None: if position not in positions_with_rule: raise ValueError("When position_relative_to_rule is specified" " position must be %s" % ', '.join(positions_with_rule)) if isinstance(position_relative_to_rule, DimensionDataFirewallRule): rule_name = position_relative_to_rule.name else: rule_name = position_relative_to_rule placement.set('relativeToRule', rule_name) else: if position not in positions_without_rule: raise ValueError("When position_relative_to_rule is not" " specified position must be %s" % ', '.join(positions_without_rule)) placement.set('position', position) response = self.connection.request_with_orgId_api_2( 'network/createFirewallRule', method='POST', data=ET.tostring(create_node)).object rule_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'firewallRuleId': rule_id = info.get('value') rule.id = rule_id return rule def ex_edit_firewall_rule(self, rule, position, relative_rule_for_position=None): """ Edit a firewall rule >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> >>> # List firewall rules >>> firewall_rules = driver.ex_list_firewall_rules(my_network_domain) >>> >>> # Get Firewall Rule by name >>> pprint("List specific firewall rule by name") >>> fire_rule_under_test = (list(filter(lambda x: x.name == 'My_New_Firewall_Rule', firewall_rules))[0]) >>> pprint(fire_rule_under_test.source) >>> pprint(fire_rule_under_test.destination) >>> >>> # Edit Firewall >>> fire_rule_under_test.destination.address_list_id = '5e7c323f-c885-4e4b-9a27-94c44217dbd3' >>> fire_rule_under_test.destination.port_list_id = 'b6557c5a-45fa-4138-89bd-8fe68392691b' >>> result = driver.ex_edit_firewall_rule(fire_rule_under_test, 'LAST') >>> pprint(result) :param rule: (required) The rule in which to create :type rule: :class:`DimensionDataFirewallRule` :param position: (required) There are two types of positions with position_relative_to_rule arg and without it With: 'BEFORE' or 'AFTER' Without: 'FIRST' or 'LAST' :type position: ``str`` :param relative_rule_for_position: (optional) The rule or rule name in which to decide the relative rule for positioning. :type relative_rule_for_position: :class:`DimensionDataFirewallRule` or ``str`` :rtype: ``bool`` """ positions_without_rule = ('FIRST', 'LAST') positions_with_rule = ('BEFORE', 'AFTER') edit_node = ET.Element('editFirewallRule', {'xmlns': TYPES_URN, 'id': rule.id}) ET.SubElement(edit_node, "action").text = rule.action ET.SubElement(edit_node, "protocol").text = rule.protocol # Source address source = ET.SubElement(edit_node, "source") if rule.source.address_list_id is not None: source_ip = ET.SubElement(source, 'ipAddressListId') source_ip.text = rule.source.address_list_id else: source_ip = ET.SubElement(source, 'ip') if rule.source.any_ip: source_ip.set('address', 'ANY') else: source_ip.set('address', rule.source.ip_address) if rule.source.ip_prefix_size is not None: source_ip.set('prefixSize', str(rule.source.ip_prefix_size)) # Setup source port rule if rule.source.port_list_id is not None: source_port = ET.SubElement(source, 'portListId') source_port.text = rule.source.port_list_id else: if rule.source.port_begin is not None: source_port = ET.SubElement(source, 'port') source_port.set('begin', rule.source.port_begin) if rule.source.port_end is not None: source_port.set('end', rule.source.port_end) # Setup destination port rule dest = ET.SubElement(edit_node, "destination") if rule.destination.address_list_id is not None: dest_ip = ET.SubElement(dest, 'ipAddressListId') dest_ip.text = rule.destination.address_list_id else: dest_ip = ET.SubElement(dest, 'ip') if rule.destination.any_ip: dest_ip.set('address', 'ANY') else: dest_ip.set('address', rule.destination.ip_address) if rule.destination.ip_prefix_size is not None: dest_ip.set('prefixSize', rule.destination.ip_prefix_size) if rule.destination.port_list_id is not None: dest_port = ET.SubElement(dest, 'portListId') dest_port.text = rule.destination.port_list_id else: if rule.destination.port_begin is not None: dest_port = ET.SubElement(dest, 'port') dest_port.set('begin', rule.destination.port_begin) if rule.destination.port_end is not None: dest_port.set('end', rule.destination.port_end) # Set up positioning of rule ET.SubElement(edit_node, "enabled").text = str(rule.enabled).lower() placement = ET.SubElement(edit_node, "placement") if relative_rule_for_position is not None: if position not in positions_with_rule: raise ValueError("When position_relative_to_rule is specified" " position must be %s" % ', '.join(positions_with_rule)) if isinstance(relative_rule_for_position, DimensionDataFirewallRule): rule_name = relative_rule_for_position.name else: rule_name = relative_rule_for_position placement.set('relativeToRule', rule_name) else: if position not in positions_without_rule: raise ValueError("When position_relative_to_rule is not" " specified position must be %s" % ', '.join(positions_without_rule)) placement.set('position', position) response = self.connection.request_with_orgId_api_2( 'network/editFirewallRule', method='POST', data=ET.tostring(edit_node)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_get_firewall_rule(self, network_domain, rule_id): locations = self.list_locations() rule = self.connection.request_with_orgId_api_2( 'network/firewallRule/%s' % rule_id).object return self._to_firewall_rule(rule, locations, network_domain) def ex_set_firewall_rule_state(self, rule, state): """ Change the state (enabled or disabled) of a rule :param rule: The rule to delete :type rule: :class:`DimensionDataFirewallRule` :param state: The desired state enabled (True) or disabled (False) :type state: ``bool`` :rtype: ``bool`` """ update_node = ET.Element('editFirewallRule', {'xmlns': TYPES_URN}) update_node.set('id', rule.id) ET.SubElement(update_node, 'enabled').text = str(state).lower() result = self.connection.request_with_orgId_api_2( 'network/editFirewallRule', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_delete_firewall_rule(self, rule): """ Delete a firewall rule :param rule: The rule to delete :type rule: :class:`DimensionDataFirewallRule` :rtype: ``bool`` """ update_node = ET.Element('deleteFirewallRule', {'xmlns': TYPES_URN}) update_node.set('id', rule.id) result = self.connection.request_with_orgId_api_2( 'network/deleteFirewallRule', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_create_nat_rule(self, network_domain, internal_ip, external_ip): """ Create a NAT rule :param network_domain: The network domain the rule belongs to :type network_domain: :class:`DimensionDataNetworkDomain` :param internal_ip: The IPv4 address internally :type internal_ip: ``str`` :param external_ip: The IPv4 address externally :type external_ip: ``str`` :rtype: :class:`DimensionDataNatRule` """ create_node = ET.Element('createNatRule', {'xmlns': TYPES_URN}) ET.SubElement(create_node, 'networkDomainId').text = network_domain.id ET.SubElement(create_node, 'internalIp').text = internal_ip ET.SubElement(create_node, 'externalIp').text = external_ip result = self.connection.request_with_orgId_api_2( 'network/createNatRule', method='POST', data=ET.tostring(create_node)).object rule_id = None for info in findall(result, 'info', TYPES_URN): if info.get('name') == 'natRuleId': rule_id = info.get('value') return DimensionDataNatRule( id=rule_id, network_domain=network_domain, internal_ip=internal_ip, external_ip=external_ip, status=NodeState.RUNNING ) def ex_list_nat_rules(self, network_domain): """ Get NAT rules for the network domain :param network_domain: The network domain the rules belongs to :type network_domain: :class:`DimensionDataNetworkDomain` :rtype: ``list`` of :class:`DimensionDataNatRule` """ params = {} params['networkDomainId'] = network_domain.id response = self.connection \ .request_with_orgId_api_2('network/natRule', params=params).object return self._to_nat_rules(response, network_domain) def ex_get_nat_rule(self, network_domain, rule_id): """ Get a NAT rule by ID :param network_domain: The network domain the rule belongs to :type network_domain: :class:`DimensionDataNetworkDomain` :param rule_id: The ID of the NAT rule to fetch :type rule_id: ``str`` :rtype: :class:`DimensionDataNatRule` """ rule = self.connection.request_with_orgId_api_2( 'network/natRule/%s' % rule_id).object return self._to_nat_rule(rule, network_domain) def ex_delete_nat_rule(self, rule): """ Delete an existing NAT rule :param rule: The rule to delete :type rule: :class:`DimensionDataNatRule` :rtype: ``bool`` """ update_node = ET.Element('deleteNatRule', {'xmlns': TYPES_URN}) update_node.set('id', rule.id) result = self.connection.request_with_orgId_api_2( 'network/deleteNatRule', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_get_location_by_id(self, id): """ Get location by ID. :param id: ID of the node location which should be used :type id: ``str`` :rtype: :class:`NodeLocation` """ location = None if id is not None: location = self.list_locations(ex_id=id)[0] return location def ex_wait_for_state(self, state, func, poll_interval=2, timeout=60, *args, **kwargs): """ Wait for the function which returns a instance with field status to match Keep polling func until one of the desired states is matched :param state: Either the desired state (`str`) or a `list` of states :type state: ``str`` or ``list`` :param func: The function to call, e.g. ex_get_vlan :type func: ``function`` :param poll_interval: The number of seconds to wait between checks :type poll_interval: `int` :param timeout: The total number of seconds to wait to reach a state :type timeout: `int` :param args: The arguments for func :type args: Positional arguments :param kwargs: The arguments for func :type kwargs: Keyword arguments """ return self.connection.wait_for_state(state, func, poll_interval, timeout, *args, **kwargs) def ex_enable_monitoring(self, node, service_plan="ESSENTIALS"): """ Enables cloud monitoring on a node :param node: The node to monitor :type node: :class:`Node` :param service_plan: The service plan, one of ESSENTIALS or ADVANCED :type service_plan: ``str`` :rtype: ``bool`` """ update_node = ET.Element('enableServerMonitoring', {'xmlns': TYPES_URN}) update_node.set('id', node.id) ET.SubElement(update_node, 'servicePlan').text = service_plan result = self.connection.request_with_orgId_api_2( 'server/enableServerMonitoring', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_update_monitoring_plan(self, node, service_plan="ESSENTIALS"): """ Updates the service plan on a node with monitoring :param node: The node to monitor :type node: :class:`Node` :param service_plan: The service plan, one of ESSENTIALS or ADVANCED :type service_plan: ``str`` :rtype: ``bool`` """ update_node = ET.Element('changeServerMonitoringPlan', {'xmlns': TYPES_URN}) update_node.set('id', node.id) ET.SubElement(update_node, 'servicePlan').text = service_plan result = self.connection.request_with_orgId_api_2( 'server/changeServerMonitoringPlan', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_disable_monitoring(self, node): """ Disables cloud monitoring for a node :param node: The node to stop monitoring :type node: :class:`Node` :rtype: ``bool`` """ update_node = ET.Element('disableServerMonitoring', {'xmlns': TYPES_URN}) update_node.set('id', node.id) result = self.connection.request_with_orgId_api_2( 'server/disableServerMonitoring', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_add_storage_to_node(self, node, amount, speed='STANDARD', scsi_id=None): """ Add storage to the node :param node: The server to add storage to :type node: :class:`Node` :param amount: The amount of storage to add, in GB :type amount: ``int`` :param speed: The disk speed type :type speed: ``str`` :param scsi_id: The target SCSI ID (optional) :type scsi_id: ``int`` :rtype: ``bool`` """ update_node = ET.Element('addDisk', {'xmlns': TYPES_URN}) update_node.set('id', node.id) ET.SubElement(update_node, 'sizeGb').text = str(amount) ET.SubElement(update_node, 'speed').text = speed.upper() if scsi_id is not None: ET.SubElement(update_node, 'scsiId').text = str(scsi_id) result = self.connection.request_with_orgId_api_2( 'server/addDisk', method='POST', data=ET.tostring(update_node)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_remove_storage_from_node(self, node, scsi_id): """ Remove storage from a node :param node: The server to add storage to :type node: :class:`Node` :param scsi_id: The ID of the disk to remove :type scsi_id: ``str`` :rtype: ``bool`` """ disk = [disk for disk in node.extra['disks'] if disk.scsi_id == scsi_id][0] return self.ex_remove_storage(disk.id) def ex_remove_storage(self, disk_id): """ Remove storage from a node :param node: The server to add storage to :type node: :class:`Node` :param disk_id: The ID of the disk to remove :type disk_id: ``str`` :rtype: ``bool`` """ remove_disk = ET.Element('removeDisk', {'xmlns': TYPES_URN}) remove_disk.set('id', disk_id) result = self.connection.request_with_orgId_api_2( 'server/removeDisk', method='POST', data=ET.tostring(remove_disk)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_change_storage_speed(self, node, disk_id, speed): """ Change the speed (disk tier) of a disk :param node: The server to change the disk speed of :type node: :class:`Node` :param disk_id: The ID of the disk to change :type disk_id: ``str`` :param speed: The disk speed type e.g. STANDARD :type speed: ``str`` :rtype: ``bool`` """ create_node = ET.Element('ChangeDiskSpeed', {'xmlns': SERVER_NS}) ET.SubElement(create_node, 'speed').text = speed result = self.connection.request_with_orgId_api_1( 'server/%s/disk/%s/changeSpeed' % (node.id, disk_id), method='POST', data=ET.tostring(create_node)).object response_code = findtext(result, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_change_storage_size(self, node, disk_id, size): """ Change the size of a disk :param node: The server to change the disk of :type node: :class:`Node` :param disk_id: The ID of the disk to resize :type disk_id: ``str`` :param size: The disk size in GB :type size: ``int`` :rtype: ``bool`` """ create_node = ET.Element('ChangeDiskSize', {'xmlns': SERVER_NS}) ET.SubElement(create_node, 'newSizeGb').text = str(size) result = self.connection.request_with_orgId_api_1( 'server/%s/disk/%s/changeSize' % (node.id, disk_id), method='POST', data=ET.tostring(create_node)).object response_code = findtext(result, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_reconfigure_node(self, node, memory_gb, cpu_count, cores_per_socket, cpu_performance): """ Reconfigure the virtual hardware specification of a node :param node: The server to change :type node: :class:`Node` :param memory_gb: The amount of memory in GB (optional) :type memory_gb: ``int`` :param cpu_count: The number of CPU (optional) :type cpu_count: ``int`` :param cores_per_socket: Number of CPU cores per socket (optional) :type cores_per_socket: ``int`` :param cpu_performance: CPU Performance type (optional) :type cpu_performance: ``str`` :rtype: ``bool`` """ update = ET.Element('reconfigureServer', {'xmlns': TYPES_URN}) update.set('id', node.id) if memory_gb is not None: ET.SubElement(update, 'memoryGb').text = str(memory_gb) if cpu_count is not None: ET.SubElement(update, 'cpuCount').text = str(cpu_count) if cpu_performance is not None: ET.SubElement(update, 'cpuSpeed').text = cpu_performance if cores_per_socket is not None: ET.SubElement(update, 'coresPerSocket').text = \ str(cores_per_socket) result = self.connection.request_with_orgId_api_2( 'server/reconfigureServer', method='POST', data=ET.tostring(update)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_clone_node_to_image(self, node, image_name, image_description=None, cluster_id=None, is_guest_Os_Customization=None, tag_key_id=None, tag_value=None): """ Clone a server into a customer image. :param node: The server to clone :type node: :class:`Node` :param image_name: The name of the clone image :type image_name: ``str`` :param description: The description of the image :type description: ``str`` :rtype: ``bool`` """ if image_description is None: image_description = '' node_id = self._node_to_node_id(node) # Version 2.3 and lower if LooseVersion(self.connection.active_api_version) < LooseVersion( '2.4'): response = self.connection.request_with_orgId_api_1( 'server/%s?clone=%s&desc=%s' % (node_id, image_name, image_description)).object # Version 2.4 and higher else: clone_server_elem = ET.Element('cloneServer', {'id': node_id, 'xmlns': TYPES_URN}) ET.SubElement(clone_server_elem, 'imageName').text = image_name if image_description is not None: ET.SubElement(clone_server_elem, 'description').text = \ image_description if cluster_id is not None: ET.SubElement(clone_server_elem, 'clusterId').text = \ cluster_id if is_guest_Os_Customization is not None: ET.SubElement(clone_server_elem, 'guestOsCustomization')\ .text = is_guest_Os_Customization if tag_key_id is not None: tag_elem = ET.SubElement(clone_server_elem, 'tagById') ET.SubElement(tag_elem, 'tagKeyId').text = tag_key_id if tag_value is not None: ET.SubElement(tag_elem, 'value').text = tag_value response = self.connection.request_with_orgId_api_2( 'server/cloneServer', method='POST', data=ET.tostring(clone_server_elem)).object # Version 2.3 and lower if LooseVersion(self.connection.active_api_version) < LooseVersion( '2.4'): response_code = findtext(response, 'result', GENERAL_NS) else: response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_clean_failed_deployment(self, node): """ Removes a node that has failed to deploy :param node: The failed node to clean :type node: :class:`Node` or ``str`` """ node_id = self._node_to_node_id(node) request_elm = ET.Element('cleanServer', {'xmlns': TYPES_URN, 'id': node_id}) body = self.connection.request_with_orgId_api_2( 'server/cleanServer', method='POST', data=ET.tostring(request_elm)).object response_code = findtext(body, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_customer_images(self, location=None): """ Return a list of customer imported images :param location: The target location :type location: :class:`NodeLocation` or ``str`` :rtype: ``list`` of :class:`NodeImage` """ params = {} if location is not None: params['datacenterId'] = self._location_to_location_id(location) return self._to_images( self.connection.request_with_orgId_api_2( 'image/customerImage', params=params) .object, 'customerImage') def ex_get_base_image_by_id(self, id): """ Gets a Base image in the Dimension Data Cloud given the id :param id: The id of the image :type id: ``str`` :rtype: :class:`NodeImage` """ image = self.connection.request_with_orgId_api_2( 'image/osImage/%s' % id).object return self._to_image(image) def ex_get_customer_image_by_id(self, id): """ Gets a Customer image in the Dimension Data Cloud given the id :param id: The id of the image :type id: ``str`` :rtype: :class:`NodeImage` """ image = self.connection.request_with_orgId_api_2( 'image/customerImage/%s' % id).object return self._to_image(image) def ex_get_image_by_id(self, id): """ Gets a Base/Customer image in the Dimension Data Cloud given the id Note: This first checks the base image If it is not a base image we check if it is a customer image If it is not in either of these a DimensionDataAPIException is thrown :param id: The id of the image :type id: ``str`` :rtype: :class:`NodeImage` """ try: return self.ex_get_base_image_by_id(id) except DimensionDataAPIException as e: if e.code != 'RESOURCE_NOT_FOUND': raise e return self.ex_get_customer_image_by_id(id) def ex_create_tag_key(self, name, description=None, value_required=True, display_on_report=True): """ Creates a tag key in the Dimension Data Cloud :param name: The name of the tag key (required) :type name: ``str`` :param description: The description of the tag key :type description: ``str`` :param value_required: If a value is required for the tag Tags themselves can be just a tag, or be a key/value pair :type value_required: ``bool`` :param display_on_report: Should this key show up on the usage reports :type display_on_report: ``bool`` :rtype: ``bool`` """ create_tag_key = ET.Element('createTagKey', {'xmlns': TYPES_URN}) ET.SubElement(create_tag_key, 'name').text = name if description is not None: ET.SubElement(create_tag_key, 'description').text = description ET.SubElement(create_tag_key, 'valueRequired').text = \ str(value_required).lower() ET.SubElement(create_tag_key, 'displayOnReport').text = \ str(display_on_report).lower() response = self.connection.request_with_orgId_api_2( 'tag/createTagKey', method='POST', data=ET.tostring(create_tag_key)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_tag_keys(self, id=None, name=None, value_required=None, display_on_report=None): """ List tag keys in the Dimension Data Cloud :param id: Filter the list to the id of the tag key :type id: ``str`` :param name: Filter the list to the name of the tag key :type name: ``str`` :param value_required: Filter the list to if a value is required for a tag key :type value_required: ``bool`` :param display_on_report: Filter the list to if the tag key should show up on usage reports :type display_on_report: ``bool`` :rtype: ``list`` of :class:`DimensionDataTagKey` """ params = {} if id is not None: params['id'] = id if name is not None: params['name'] = name if value_required is not None: params['valueRequired'] = str(value_required).lower() if display_on_report is not None: params['displayOnReport'] = str(display_on_report).lower() paged_result = self.connection.paginated_request_with_orgId_api_2( 'tag/tagKey', method='GET', params=params ) tag_keys = [] for result in paged_result: tag_keys.extend(self._to_tag_keys(result)) return tag_keys def ex_get_tag_key_by_id(self, id): """ Get a specific tag key by ID :param id: ID of the tag key you want (required) :type id: ``str`` :rtype: :class:`DimensionDataTagKey` """ tag_key = self.connection.request_with_orgId_api_2( 'tag/tagKey/%s' % id).object return self._to_tag_key(tag_key) def ex_get_tag_key_by_name(self, name): """ Get a specific tag key by Name :param name: Name of the tag key you want (required) :type name: ``str`` :rtype: :class:`DimensionDataTagKey` """ tag_keys = self.ex_list_tag_keys(name=name) if len(tag_keys) != 1: raise ValueError("No tags found with name %s" % name) return tag_keys[0] def ex_modify_tag_key(self, tag_key, name=None, description=None, value_required=None, display_on_report=None): """ Modify a specific tag key :param tag_key: The tag key you want to modify (required) :type tag_key: :class:`DimensionDataTagKey` or ``str`` :param name: Set to modifiy the name of the tag key :type name: ``str`` :param description: Set to modify the description of the tag key :type description: ``str`` :param value_required: Set to modify if a value is required for the tag key :type value_required: ``bool`` :param display_on_report: Set to modify if this tag key should display on the usage reports :type display_on_report: ``bool`` :rtype: ``bool`` """ tag_key_id = self._tag_key_to_tag_key_id(tag_key) modify_tag_key = ET.Element('editTagKey', {'xmlns': TYPES_URN, 'id': tag_key_id}) if name is not None: ET.SubElement(modify_tag_key, 'name').text = name if description is not None: ET.SubElement(modify_tag_key, 'description').text = description if value_required is not None: ET.SubElement(modify_tag_key, 'valueRequired').text = \ str(value_required).lower() if display_on_report is not None: ET.SubElement(modify_tag_key, 'displayOnReport').text = \ str(display_on_report).lower() response = self.connection.request_with_orgId_api_2( 'tag/editTagKey', method='POST', data=ET.tostring(modify_tag_key)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_remove_tag_key(self, tag_key): """ Modify a specific tag key :param tag_key: The tag key you want to remove (required) :type tag_key: :class:`DimensionDataTagKey` or ``str`` :rtype: ``bool`` """ tag_key_id = self._tag_key_to_tag_key_id(tag_key) remove_tag_key = ET.Element('deleteTagKey', {'xmlns': TYPES_URN, 'id': tag_key_id}) response = self.connection.request_with_orgId_api_2( 'tag/deleteTagKey', method='POST', data=ET.tostring(remove_tag_key)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_apply_tag_to_asset(self, asset, tag_key, value=None): """ Apply a tag to a Dimension Data Asset :param asset: The asset to apply a tag to. (required) :type asset: :class:`Node` or :class:`NodeImage` or :class:`DimensionDataNewtorkDomain` or :class:`DimensionDataVlan` or :class:`DimensionDataPublicIpBlock` :param tag_key: The tag_key to apply to the asset. (required) :type tag_key: :class:`DimensionDataTagKey` or ``str`` :param value: The value to be assigned to the tag key This is only required if the :class:`DimensionDataTagKey` requires it :type value: ``str`` :rtype: ``bool`` """ asset_type = self._get_tagging_asset_type(asset) tag_key_name = self._tag_key_to_tag_key_name(tag_key) apply_tags = ET.Element('applyTags', {'xmlns': TYPES_URN}) ET.SubElement(apply_tags, 'assetType').text = asset_type ET.SubElement(apply_tags, 'assetId').text = asset.id tag_ele = ET.SubElement(apply_tags, 'tag') ET.SubElement(tag_ele, 'tagKeyName').text = tag_key_name if value is not None: ET.SubElement(tag_ele, 'value').text = value response = self.connection.request_with_orgId_api_2( 'tag/applyTags', method='POST', data=ET.tostring(apply_tags)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_remove_tag_from_asset(self, asset, tag_key): """ Remove a tag from an asset :param asset: The asset to remove a tag from. (required) :type asset: :class:`Node` or :class:`NodeImage` or :class:`DimensionDataNewtorkDomain` or :class:`DimensionDataVlan` or :class:`DimensionDataPublicIpBlock` :param tag_key: The tag key you want to remove (required) :type tag_key: :class:`DimensionDataTagKey` or ``str`` :rtype: ``bool`` """ asset_type = self._get_tagging_asset_type(asset) tag_key_name = self._tag_key_to_tag_key_name(tag_key) apply_tags = ET.Element('removeTags', {'xmlns': TYPES_URN}) ET.SubElement(apply_tags, 'assetType').text = asset_type ET.SubElement(apply_tags, 'assetId').text = asset.id ET.SubElement(apply_tags, 'tagKeyName').text = tag_key_name response = self.connection.request_with_orgId_api_2( 'tag/removeTags', method='POST', data=ET.tostring(apply_tags)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_tags(self, asset_id=None, asset_type=None, location=None, tag_key_name=None, tag_key_id=None, value=None, value_required=None, display_on_report=None): """ List tags in the Dimension Data Cloud :param asset_id: Filter the list by asset id :type asset_id: ``str`` :param asset_type: Filter the list by asset type :type asset_type: ``str`` :param location: Filter the list by the assets location :type location: :class:``NodeLocation`` or ``str`` :param tag_key_name: Filter the list by a tag key name :type tag_key_name: ``str`` :param tag_key_id: Filter the list by a tag key id :type tag_key_id: ``str`` :param value: Filter the list by a tag value :type value: ``str`` :param value_required: Filter the list to if a value is required for a tag :type value_required: ``bool`` :param display_on_report: Filter the list to if the tag should show up on usage reports :type display_on_report: ``bool`` :rtype: ``list`` of :class:`DimensionDataTag` """ params = {} if asset_id is not None: params['assetId'] = asset_id if asset_type is not None: params['assetType'] = asset_type if location is not None: params['datacenterId'] = self._location_to_location_id(location) if tag_key_name is not None: params['tagKeyName'] = tag_key_name if tag_key_id is not None: params['tagKeyId'] = tag_key_id if value is not None: params['value'] = value if value_required is not None: params['valueRequired'] = str(value_required).lower() if display_on_report is not None: params['displayOnReport'] = str(display_on_report).lower() paged_result = self.connection.paginated_request_with_orgId_api_2( 'tag/tag', method='GET', params=params ) tags = [] for result in paged_result: tags.extend(self._to_tags(result)) return tags def ex_summary_usage_report(self, start_date, end_date): """ Get summary usage information :param start_date: Start date for the report :type start_date: ``str`` in format YYYY-MM-DD :param end_date: End date for the report :type end_date: ``str`` in format YYYY-MM-DD :rtype: ``list`` of ``list`` """ result = self.connection.raw_request_with_orgId_api_1( 'report/usage?startDate=%s&endDate=%s' % ( start_date, end_date)) return self._format_csv(result.response) def ex_detailed_usage_report(self, start_date, end_date): """ Get detailed usage information :param start_date: Start date for the report :type start_date: ``str`` in format YYYY-MM-DD :param end_date: End date for the report :type end_date: ``str`` in format YYYY-MM-DD :rtype: ``list`` of ``list`` """ result = self.connection.raw_request_with_orgId_api_1( 'report/usageDetailed?startDate=%s&endDate=%s' % ( start_date, end_date)) return self._format_csv(result.response) def ex_software_usage_report(self, start_date, end_date): """ Get detailed software usage reports :param start_date: Start date for the report :type start_date: ``str`` in format YYYY-MM-DD :param end_date: End date for the report :type end_date: ``str`` in format YYYY-MM-DD :rtype: ``list`` of ``list`` """ result = self.connection.raw_request_with_orgId_api_1( 'report/usageSoftwareUnits?startDate=%s&endDate=%s' % ( start_date, end_date)) return self._format_csv(result.response) def ex_audit_log_report(self, start_date, end_date): """ Get audit log report :param start_date: Start date for the report :type start_date: ``str`` in format YYYY-MM-DD :param end_date: End date for the report :type end_date: ``str`` in format YYYY-MM-DD :rtype: ``list`` of ``list`` """ result = self.connection.raw_request_with_orgId_api_1( 'auditlog?startDate=%s&endDate=%s' % ( start_date, end_date)) return self._format_csv(result.response) def ex_backup_usage_report(self, start_date, end_date, location): """ Get audit log report :param start_date: Start date for the report :type start_date: ``str`` in format YYYY-MM-DD :param end_date: End date for the report :type end_date: ``str`` in format YYYY-MM-DD :keyword location: Filters the node list to nodes that are located in this location :type location: :class:`NodeLocation` or ``str`` :rtype: ``list`` of ``list`` """ datacenter_id = self._location_to_location_id(location) result = self.connection.raw_request_with_orgId_api_1( 'backup/detailedUsageReport?datacenterId=%s&fromDate=%s&toDate=%s' % (datacenter_id, start_date, end_date)) return self._format_csv(result.response) def ex_list_ip_address_list(self, ex_network_domain): """ List IP Address List by network domain ID specified >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> # List IP Address List of network domain >>> ipaddresslist_list = driver.ex_list_ip_address_list( >>> ex_network_domain=my_network_domain) >>> pprint(ipaddresslist_list) :param ex_network_domain: The network domain or network domain ID :type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str' :return: a list of DimensionDataIpAddressList objects :rtype: ``list`` of :class:`DimensionDataIpAddressList` """ params = {'networkDomainId': self._network_domain_to_network_domain_id( ex_network_domain)} response = self.connection.request_with_orgId_api_2( 'network/ipAddressList', params=params).object return self._to_ip_address_lists(response) def ex_get_ip_address_list(self, ex_network_domain, ex_ip_address_list_name): """ Get IP Address List by name in network domain specified >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> # Get IP Address List by Name >>> ipaddresslist_list_by_name = driver.ex_get_ip_address_list( >>> ex_network_domain=my_network_domain, >>> ex_ip_address_list_name='My_IP_AddressList_1') >>> pprint(ipaddresslist_list_by_name) :param ex_network_domain: (required) The network domain or network domain ID in which ipaddresslist resides. :type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str' :param ex_ip_address_list_name: (required) Get 'IP Address List' by name :type ex_ip_address_list_name: :``str`` :return: a list of DimensionDataIpAddressList objects :rtype: ``list`` of :class:`DimensionDataIpAddressList` """ ip_address_lists = self.ex_list_ip_address_list(ex_network_domain) return list(filter(lambda x: x.name == ex_ip_address_list_name, ip_address_lists)) def ex_create_ip_address_list(self, ex_network_domain, name, description, ip_version, ip_address_collection, child_ip_address_list=None): """ Create IP Address List. IP Address list. >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> from libcloud.common.dimensiondata import DimensionDataIpAddress >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> # IP Address collection >>> ipAddress_1 = DimensionDataIpAddress(begin='190.2.2.100') >>> ipAddress_2 = DimensionDataIpAddress(begin='190.2.2.106', end='190.2.2.108') >>> ipAddress_3 = DimensionDataIpAddress(begin='190.2.2.0', prefix_size='24') >>> ip_address_collection = [ipAddress_1, ipAddress_2, ipAddress_3] >>> >>> # Create IPAddressList >>> result = driver.ex_create_ip_address_list( >>> ex_network_domain=my_network_domain, >>> name='My_IP_AddressList_2', >>> ip_version='IPV4', >>> description='Test only', >>> ip_address_collection=ip_address_collection, >>> child_ip_address_list='08468e26-eeb3-4c3d-8ff2-5351fa6d8a04' >>> ) >>> >>> pprint(result) :param ex_network_domain: The network domain or network domain ID :type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str' :param name: IP Address List Name (required) :type name: :``str`` :param description: IP Address List Description (optional) :type description: :``str`` :param ip_version: IP Version of ip address (required) :type ip_version: :``str`` :param ip_address_collection: List of IP Address. At least one ipAddress element or one childIpAddressListId element must be provided. :type ip_address_collection: :``str`` :param child_ip_address_list: Child IP Address List or id to be included in this IP Address List. At least one ipAddress or one childIpAddressListId must be provided. :type child_ip_address_list: :class:'DimensionDataChildIpAddressList` or `str`` :return: a list of DimensionDataIpAddressList objects :rtype: ``list`` of :class:`DimensionDataIpAddressList` """ if (ip_address_collection is None and child_ip_address_list is None): raise ValueError("At least one ipAddress element or one " "childIpAddressListId element must be " "provided.") create_ip_address_list = ET.Element('createIpAddressList', {'xmlns': TYPES_URN}) ET.SubElement( create_ip_address_list, 'networkDomainId' ).text = self._network_domain_to_network_domain_id(ex_network_domain) ET.SubElement( create_ip_address_list, 'name' ).text = name ET.SubElement( create_ip_address_list, 'description' ).text = description ET.SubElement( create_ip_address_list, 'ipVersion' ).text = ip_version for ip in ip_address_collection: ip_address = ET.SubElement( create_ip_address_list, 'ipAddress', ) ip_address.set('begin', ip.begin) if ip.end: ip_address.set('end', ip.end) if ip.prefix_size: ip_address.set('prefixSize', ip.prefix_size) if child_ip_address_list is not None: ET.SubElement( create_ip_address_list, 'childIpAddressListId' ).text = \ self._child_ip_address_list_to_child_ip_address_list_id( child_ip_address_list) response = self.connection.request_with_orgId_api_2( 'network/createIpAddressList', method='POST', data=ET.tostring(create_ip_address_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_edit_ip_address_list(self, ex_ip_address_list, description, ip_address_collection, child_ip_address_lists=None): """ Edit IP Address List. IP Address list. >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> from libcloud.common.dimensiondata import DimensionDataIpAddress >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # IP Address collection >>> ipAddress_1 = DimensionDataIpAddress(begin='190.2.2.100') >>> ipAddress_2 = DimensionDataIpAddress(begin='190.2.2.106', >>> end='190.2.2.108') >>> ipAddress_3 = DimensionDataIpAddress( >>> begin='190.2.2.0', prefix_size='24') >>> ip_address_collection = [ipAddress_1, ipAddress_2, ipAddress_3] >>> >>> # Edit IP Address List >>> ip_address_list_id = '5e7c323f-c885-4e4b-9a27-94c44217dbd3' >>> result = driver.ex_edit_ip_address_list( >>> ex_ip_address_list=ip_address_list_id, >>> description="Edit Test", >>> ip_address_collection=ip_address_collection, >>> child_ip_address_lists=None >>> ) >>> pprint(result) :param ex_ip_address_list: (required) IpAddressList object or IpAddressList ID :type ex_ip_address_list: :class:'DimensionDataIpAddressList' or ``str`` :param description: IP Address List Description :type description: :``str`` :param ip_address_collection: List of IP Address :type ip_address_collection: ''list'' of :class:'DimensionDataIpAddressList' :param child_ip_address_lists: Child IP Address List or id to be included in this IP Address List :type child_ip_address_lists: ``list`` of :class:'DimensionDataChildIpAddressList' or ``str`` :return: a list of DimensionDataIpAddressList objects :rtype: ``list`` of :class:`DimensionDataIpAddressList` """ edit_ip_address_list = ET.Element( 'editIpAddressList', {'xmlns': TYPES_URN, "id": self._ip_address_list_to_ip_address_list_id( ex_ip_address_list), 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance" }) ET.SubElement( edit_ip_address_list, 'description' ).text = description for ip in ip_address_collection: ip_address = ET.SubElement( edit_ip_address_list, 'ipAddress', ) ip_address.set('begin', ip.begin) if ip.end: ip_address.set('end', ip.end) if ip.prefix_size: ip_address.set('prefixSize', ip.prefix_size) if child_ip_address_lists is not None: ET.SubElement( edit_ip_address_list, 'childIpAddressListId' ).text = self._child_ip_address_list_to_child_ip_address_list_id( child_ip_address_lists) else: ET.SubElement( edit_ip_address_list, 'childIpAddressListId', {'xsi:nil': 'true'} ) response = self.connection.request_with_orgId_api_2( 'network/editIpAddressList', method='POST', data=ET.tostring(edit_ip_address_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_delete_ip_address_list(self, ex_ip_address_list): """ Delete IP Address List by ID >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> ip_address_list_id = '5e7c323f-c885-4e4b-9a27-94c44217dbd3' >>> result = driver.ex_delete_ip_address_list(ip_address_list_id) >>> pprint(result) :param ex_ip_address_list: IP Address List object or IP Address List ID (required) :type ex_ip_address_list: :class:'DimensionDataIpAddressList' or ``str`` :rtype: ``bool`` """ delete_ip_address_list = \ ET.Element('deleteIpAddressList', {'xmlns': TYPES_URN, 'id': self ._ip_address_list_to_ip_address_list_id( ex_ip_address_list)}) response = self.connection.request_with_orgId_api_2( 'network/deleteIpAddressList', method='POST', data=ET.tostring(delete_ip_address_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_list_portlist(self, ex_network_domain): """ List Portlist by network domain ID specified >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == >>> networkDomainName][0] >>> >>> # List portlist >>> portLists = driver.ex_list_portlist( >>> ex_network_domain=my_network_domain) >>> pprint(portLists) >>> :param ex_network_domain: The network domain or network domain ID :type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str' :return: a list of DimensionDataPortList objects :rtype: ``list`` of :class:`DimensionDataPortList` """ params = {'networkDomainId': self._network_domain_to_network_domain_id(ex_network_domain)} response = self.connection.request_with_orgId_api_2( 'network/portList', params=params).object return self._to_port_lists(response) def ex_get_portlist(self, ex_portlist_id): """ Get Port List >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get specific portlist by ID >>> portlist_id = '27dd8c66-80ff-496b-9f54-2a3da2fe679e' >>> portlist = driver.ex_get_portlist(portlist_id) >>> pprint(portlist) :param ex_portlist_id: The ex_port_list or ex_port_list ID :type ex_portlist_id: :class:`DimensionDataNetworkDomain` or 'str' :return: DimensionDataPortList object :rtype: :class:`DimensionDataPort` """ url_path = ('network/portList/%s' % ex_portlist_id) response = self.connection.request_with_orgId_api_2( url_path).object return self._to_port_list(response) def ex_create_portlist(self, ex_network_domain, name, description, port_collection, child_portlist_list=None): """ Create Port List. >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> from libcloud.common.dimensiondata import DimensionDataPort >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Get location >>> location = driver.ex_get_location_by_id(id='AU9') >>> >>> # Get network domain by location >>> networkDomainName = "Baas QA" >>> network_domains = driver.ex_list_network_domains(location=location) >>> my_network_domain = [d for d in network_domains if d.name == networkDomainName][0] >>> >>> # Port Collection >>> port_1 = DimensionDataPort(begin='1000') >>> port_2 = DimensionDataPort(begin='1001', end='1003') >>> port_collection = [port_1, port_2] >>> >>> # Create Port List >>> new_portlist = driver.ex_create_portlist( >>> ex_network_domain=my_network_domain, >>> name='MyPortListX', >>> description="Test only", >>> port_collection=port_collection, >>> child_portlist_list={'a9cd4984-6ff5-4f93-89ff-8618ab642bb9'} >>> ) >>> pprint(new_portlist) :param ex_network_domain: (required) The network domain in which to create PortList. Provide networkdomain object or its id. :type ex_network_domain: :``str`` :param name: Port List Name :type name: :``str`` :param description: IP Address List Description :type description: :``str`` :param port_collection: List of Port Address :type port_collection: :``str`` :param child_portlist_list: List of Child Portlist to be included in this Port List :type child_portlist_list: :``str`` or ''list of :class:'DimensionDataChildPortList' :return: result of operation :rtype: ``bool`` """ new_port_list = ET.Element('createPortList', {'xmlns': TYPES_URN}) ET.SubElement( new_port_list, 'networkDomainId' ).text = self._network_domain_to_network_domain_id(ex_network_domain) ET.SubElement( new_port_list, 'name' ).text = name ET.SubElement( new_port_list, 'description' ).text = description for port in port_collection: p = ET.SubElement( new_port_list, 'port' ) p.set('begin', port.begin) if port.end: p.set('end', port.end) if child_portlist_list is not None: for child in child_portlist_list: ET.SubElement( new_port_list, 'childPortListId' ).text = self._child_port_list_to_child_port_list_id(child) response = self.connection.request_with_orgId_api_2( 'network/createPortList', method='POST', data=ET.tostring(new_port_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_edit_portlist(self, ex_portlist, description, port_collection, child_portlist_list=None): """ Edit Port List. >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> from libcloud.common.dimensiondata import DimensionDataPort >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Port Collection >>> port_1 = DimensionDataPort(begin='4200') >>> port_2 = DimensionDataPort(begin='4201', end='4210') >>> port_collection = [port_1, port_2] >>> >>> # Edit Port List >>> editPortlist = driver.ex_get_portlist( '27dd8c66-80ff-496b-9f54-2a3da2fe679e') >>> >>> result = driver.ex_edit_portlist( >>> ex_portlist=editPortlist.id, >>> description="Make Changes in portlist", >>> port_collection=port_collection, >>> child_portlist_list={'a9cd4984-6ff5-4f93-89ff-8618ab642bb9'} >>> ) >>> pprint(result) :param ex_portlist: Port List to be edited (required) :type ex_portlist: :``str`` or :class:'DimensionDataPortList' :param description: Port List Description :type description: :``str`` :param port_collection: List of Ports :type port_collection: :``str`` :param child_portlist_list: Child PortList to be included in this IP Address List :type child_portlist_list: :``list`` of :class'DimensionDataChildPortList' or ''str'' :return: a list of DimensionDataPortList objects :rtype: ``list`` of :class:`DimensionDataPortList` """ existing_port_address_list = ET.Element( 'editPortList', { "id": self._port_list_to_port_list_id(ex_portlist), 'xmlns': TYPES_URN, 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance" }) ET.SubElement( existing_port_address_list, 'description' ).text = description for port in port_collection: p = ET.SubElement( existing_port_address_list, 'port' ) p.set('begin', port.begin) if port.end: p.set('end', port.end) if child_portlist_list is not None: for child in child_portlist_list: ET.SubElement( existing_port_address_list, 'childPortListId' ).text = self._child_port_list_to_child_port_list_id(child) else: ET.SubElement( existing_port_address_list, 'childPortListId', {'xsi:nil': 'true'} ) response = self.connection.request_with_orgId_api_2( 'network/editPortList', method='POST', data=ET.tostring(existing_port_address_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_delete_portlist(self, ex_portlist): """ Delete Port List >>> from pprint import pprint >>> from libcloud.compute.types import Provider >>> from libcloud.compute.providers import get_driver >>> import libcloud.security >>> >>> # Get dimension data driver >>> libcloud.security.VERIFY_SSL_CERT = True >>> cls = get_driver(Provider.DIMENSIONDATA) >>> driver = cls('myusername','mypassword', region='dd-au') >>> >>> # Delete Port List >>> portlist_id = '157531ce-77d4-493c-866b-d3d3fc4a912a' >>> response = driver.ex_delete_portlist(portlist_id) >>> pprint(response) :param ex_portlist: Port List to be deleted :type ex_portlist: :``str`` or :class:'DimensionDataPortList' :rtype: ``bool`` """ delete_port_list = ET.Element( 'deletePortList', {'xmlns': TYPES_URN, 'id': self._port_list_to_port_list_id(ex_portlist)}) response = self.connection.request_with_orgId_api_2( 'network/deletePortList', method='POST', data=ET.tostring(delete_port_list)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_exchange_nic_vlans(self, nic_id_1, nic_id_2): """ Exchange NIC Vlans :param nic_id_1: Nic ID 1 :type nic_id_1: :``str`` :param nic_id_2: Nic ID 2 :type nic_id_2: :``str`` :rtype: ``bool`` """ exchange_elem = ET.Element( 'urn:exchangeNicVlans', { 'xmlns:urn': TYPES_URN, }) ET.SubElement(exchange_elem, 'urn:nicId1').text = nic_id_1 ET.SubElement(exchange_elem, 'urn:nicId2').text = nic_id_2 response = self.connection.request_with_orgId_api_2( 'server/exchangeNicVlans', method='POST', data=ET.tostring(exchange_elem)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_change_nic_network_adapter(self, nic_id, network_adapter_name): """ Change network adapter of a NIC on a cloud server :param nic_id: Nic ID :type nic_id: :``str`` :param network_adapter_name: Network adapter name :type network_adapter_name: :``str`` :rtype: ``bool`` """ change_elem = ET.Element( 'changeNetworkAdapter', { 'nicId': nic_id, 'xmlns': TYPES_URN }) ET.SubElement(change_elem, 'networkAdapter').text = \ network_adapter_name response = self.connection.request_with_orgId_api_2( 'server/changeNetworkAdapter', method='POST', data=ET.tostring(change_elem)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_create_node_uncustomized(self, name, image, ex_network_domain, ex_is_started=True, ex_description=None, ex_cluster_id=None, ex_cpu_specification=None, ex_memory_gb=None, ex_primary_nic_private_ipv4=None, ex_primary_nic_vlan=None, ex_primary_nic_network_adapter=None, ex_additional_nics=None, ex_disks=None, ex_tagid_value_pairs=None, ex_tagname_value_pairs=None ): """ This MCP 2.0 only function deploys a new Cloud Server from a CloudControl compatible Server Image, which does not utilize VMware Guest OS Customization process. Create Node in MCP2 Data Center :keyword name: (required) String with a name for this new node :type name: ``str`` :keyword image: (UUID of the Server Image being used as the target for the new Server deployment. The source Server Image (OS Image or Customer Image) must have osCustomization set to true. See Get/List OS Image(s) and Get/List Customer Image(s). :type image: :class:`NodeImage` or ``str`` :keyword ex_network_domain: (required) Network Domain or Network Domain ID to create the node :type ex_network_domain: :class:`DimensionDataNetworkDomain` or ``str`` :keyword ex_description: (optional) description for this node :type ex_description: ``str`` :keyword ex_cluster_id: (optional) For multiple cluster environments, it is possible to set a destination cluster for the new Customer Image. Note that performance of this function is optimal when either the Server cluster and destination are the same or when shared data storage is in place for the multiple clusters. :type ex_cluster_id: ``str`` :keyword ex_primary_nic_private_ipv4: Provide private IPv4. Ignore if ex_primary_nic_vlan is provided. Use one or the other. Not both. :type ex_primary_nic_private_ipv4: :``str`` :keyword ex_primary_nic_vlan: Provide VLAN for the node if ex_primary_nic_private_ipv4 NOT provided. One or the other. Not both. :type ex_primary_nic_vlan: :class: DimensionDataVlan or ``str`` :keyword ex_primary_nic_network_adapter: (Optional) Default value for the Operating System will be used if leave empty. Example: "E1000". :type ex_primary_nic_network_adapter: :``str`` :keyword ex_additional_nics: (optional) List :class:'DimensionDataNic' or None :type ex_additional_nics: ``list`` of :class:'DimensionDataNic' or ``str`` :keyword ex_memory_gb: (optional) The amount of memory in GB for the server Can be used to override the memory value inherited from the source Server Image. :type ex_memory_gb: ``int`` :keyword ex_cpu_specification: (optional) The spec of CPU to deploy :type ex_cpu_specification: :class:`DimensionDataServerCpuSpecification` :keyword ex_is_started: (required) Start server after creation. Default is set to true. :type ex_is_started: ``bool`` :keyword ex_disks: (optional) Dimensiondata disks. Optional disk elements can be used to define the disk speed that each disk on the Server; inherited from the source Server Image will be deployed to. It is not necessary to include a diskelement for every disk; only those that you wish to set a disk speed value for. Note that scsiId 7 cannot be used.Up to 13 disks can be present in addition to the required OS disk on SCSI ID 0. Refer to https://docs.mcp-services.net/x/UwIu for disk :type ex_disks: List or tuple of :class:'DimensionDataServerDisk` :keyword ex_tagid_value_pairs: (Optional) up to 10 tag elements may be provided. A combination of tagById and tag name cannot be supplied in the same request. Note: ex_tagid_value_pairs and ex_tagname_value_pairs is mutually exclusive. Use one or other. :type ex_tagname_value_pairs: ``dict``. Value can be None. :keyword ex_tagname_value_pairs: (Optional) up to 10 tag elements may be provided. A combination of tagById and tag name cannot be supplied in the same request. Note: ex_tagid_value_pairs and ex_tagname_value_pairs is mutually exclusive. Use one or other. :type ex_tagname_value_pairs: ``dict```. :return: The newly created :class:`Node`. :rtype: :class:`Node` """ # Unsupported for version lower than 2.4 if LooseVersion(self.connection.active_api_version) < LooseVersion( '2.4'): raise Exception("This feature is NOT supported in " "earlier api version of 2.4") # Default start to true if input is invalid if not isinstance(ex_is_started, bool): ex_is_started = True print("Warning: ex_is_started input value is invalid. Default" "to True") server_uncustomized_elm = ET.Element('deployUncustomizedServer', {'xmlns': TYPES_URN}) ET.SubElement(server_uncustomized_elm, "name").text = name ET.SubElement(server_uncustomized_elm, "description").text = \ ex_description image_id = self._image_to_image_id(image) ET.SubElement(server_uncustomized_elm, "imageId").text = image_id if ex_cluster_id: dns_elm = ET.SubElement(server_uncustomized_elm, "primaryDns") dns_elm.text = ex_cluster_id if ex_is_started is not None: ET.SubElement(server_uncustomized_elm, "start").text = str( ex_is_started).lower() if ex_cpu_specification is not None: cpu = ET.SubElement(server_uncustomized_elm, "cpu") cpu.set('speed', ex_cpu_specification.performance) cpu.set('count', str(ex_cpu_specification.cpu_count)) cpu.set('coresPerSocket', str(ex_cpu_specification.cores_per_socket)) if ex_memory_gb is not None: ET.SubElement(server_uncustomized_elm, "memoryGb").text = \ str(ex_memory_gb) if (ex_primary_nic_private_ipv4 is None and ex_primary_nic_vlan is None): raise ValueError("Missing argument. Either " "ex_primary_nic_private_ipv4 or " "ex_primary_nic_vlan " "must be specified.") if (ex_primary_nic_private_ipv4 is not None and ex_primary_nic_vlan is not None): raise ValueError("Either ex_primary_nic_private_ipv4 or " "ex_primary_nic_vlan " "be specified. Not both.") network_elm = ET.SubElement(server_uncustomized_elm, "networkInfo") net_domain_id = self._network_domain_to_network_domain_id( ex_network_domain) network_elm.set('networkDomainId', net_domain_id) pri_nic = ET.SubElement(network_elm, 'primaryNic') if ex_primary_nic_private_ipv4 is not None: ET.SubElement(pri_nic, 'privateIpv4').text = ex_primary_nic_private_ipv4 if ex_primary_nic_vlan is not None: vlan_id = self._vlan_to_vlan_id(ex_primary_nic_vlan) ET.SubElement(pri_nic, 'vlanId').text = vlan_id if ex_primary_nic_network_adapter is not None: ET.SubElement(pri_nic, "networkAdapter").text = \ ex_primary_nic_network_adapter if isinstance(ex_additional_nics, (list, tuple)): for nic in ex_additional_nics: additional_nic = ET.SubElement(network_elm, 'additionalNic') if (nic.private_ip_v4 is None and nic.vlan is None): raise ValueError("Either a vlan or private_ip_v4 " "must be specified for each " "additional nic.") if (nic.private_ip_v4 is not None and nic.vlan is not None): raise ValueError("Either a vlan or private_ip_v4 " "must be specified for each " "additional nic. Not both.") if nic.private_ip_v4 is not None: ET.SubElement(additional_nic, 'privateIpv4').text = nic.private_ip_v4 if nic.vlan is not None: vlan_id = self._vlan_to_vlan_id(nic.vlan) ET.SubElement(additional_nic, 'vlanId').text = vlan_id if nic.network_adapter_name is not None: ET.SubElement(additional_nic, "networkAdapter").text = \ nic.network_adapter_name elif ex_additional_nics is not None: raise TypeError( "ex_additional_NICs must be None or tuple/list") if isinstance(ex_disks, (list, tuple)): for disk in ex_disks: disk_elm = ET.SubElement(server_uncustomized_elm, 'disk') disk_elm.set('scsiId', disk.scsi_id) disk_elm.set('speed', disk.speed) elif ex_disks is not None: raise TypeError("ex_disks must be None or tuple/list") # tagid and tagname value pair should not co-exists if ex_tagid_value_pairs is not None and ex_tagname_value_pairs is \ not None: raise ValueError("ex_tagid_value_pairs and ex_tagname_value_pairs" "is mutually exclusive. Use one or the other.") # Tag by ID if ex_tagid_value_pairs is not None: if not isinstance(ex_tagid_value_pairs, dict): raise ValueError( "ex_tagid_value_pairs must be a dictionary." ) if sys.version_info[0] < 3: tagid_items = ex_tagid_value_pairs.iteritems() else: tagid_items = ex_tagid_value_pairs.items() for k, v in tagid_items: tag_elem = ET.SubElement(server_uncustomized_elm, 'tagById') ET.SubElement(tag_elem, 'tagKeyId').text = k if v is not None: ET.SubElement(tag_elem, 'value').text = v if ex_tagname_value_pairs is not None: if not isinstance(ex_tagname_value_pairs, dict): raise ValueError( "ex_tagname_value_pairs must be a dictionary" ) if sys.version_info[0] < 3: tags_items = ex_tagname_value_pairs.iteritems() else: tags_items = ex_tagname_value_pairs.items() for k, v in tags_items: tag_name_elem = ET.SubElement(server_uncustomized_elm, 'tag') ET.SubElement(tag_name_elem, 'tagKeyName').text = k if v is not None: ET.SubElement(tag_name_elem, 'value').text = v response = self.connection.request_with_orgId_api_2( 'server/deployUncustomizedServer', method='POST', data=ET.tostring(server_uncustomized_elm)).object node_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'serverId': node_id = info.get('value') new_node = self.ex_get_node_by_id(node_id) return new_node def _format_csv(self, http_response): text = http_response.read() lines = str.splitlines(ensure_string(text)) return [line.split(',') for line in lines] @staticmethod def _get_tagging_asset_type(asset): objecttype = type(asset) if objecttype.__name__ in OBJECT_TO_TAGGING_ASSET_TYPE_MAP: return OBJECT_TO_TAGGING_ASSET_TYPE_MAP[objecttype.__name__] raise TypeError("Asset type %s cannot be tagged" % objecttype.__name__) def _list_nodes_single_page(self, params={}): nodes = self.connection.request_with_orgId_api_2( 'server/server', params=params).object return nodes def _to_tags(self, object): tags = [] for element in object.findall(fixxpath('tag', TYPES_URN)): tags.append(self._to_tag(element)) return tags def _to_tag(self, element): tag_key = self._to_tag_key(element, from_tag_api=True) return DimensionDataTag( asset_type=findtext(element, 'assetType', TYPES_URN), asset_id=findtext(element, 'assetId', TYPES_URN), asset_name=findtext(element, 'assetName', TYPES_URN), datacenter=findtext(element, 'datacenterId', TYPES_URN), key=tag_key, value=findtext(element, 'value', TYPES_URN) ) def _to_tag_keys(self, object): keys = [] for element in object.findall(fixxpath('tagKey', TYPES_URN)): keys.append(self._to_tag_key(element)) return keys def _to_tag_key(self, element, from_tag_api=False): if from_tag_api: id = findtext(element, 'tagKeyId', TYPES_URN) name = findtext(element, 'tagKeyName', TYPES_URN) else: id = element.get('id') name = findtext(element, 'name', TYPES_URN) return DimensionDataTagKey( id=id, name=name, description=findtext(element, 'description', TYPES_URN), value_required=self._str2bool( findtext(element, 'valueRequired', TYPES_URN) ), display_on_report=self._str2bool( findtext(element, 'displayOnReport', TYPES_URN) ) ) def _to_images(self, object, el_name='osImage'): images = [] locations = self.list_locations() for element in object.findall(fixxpath(el_name, TYPES_URN)): images.append(self._to_image(element, locations)) return images def _to_image(self, element, locations=None): location_id = element.get('datacenterId') if locations is None: locations = self.list_locations(location_id) location = list(filter(lambda x: x.id == location_id, locations))[0] cpu_spec = self._to_cpu_spec(element.find(fixxpath('cpu', TYPES_URN))) if LooseVersion(self.connection.active_api_version) > LooseVersion( '2.3'): os_el = element.find(fixxpath('guest/operatingSystem', TYPES_URN)) else: os_el = element.find(fixxpath('operatingSystem', TYPES_URN)) if element.tag.endswith('customerImage'): is_customer_image = True else: is_customer_image = False extra = { 'description': findtext(element, 'description', TYPES_URN), 'OS_type': os_el.get('family'), 'OS_displayName': os_el.get('displayName'), 'cpu': cpu_spec, 'memoryGb': findtext(element, 'memoryGb', TYPES_URN), 'osImageKey': findtext(element, 'osImageKey', TYPES_URN), 'created': findtext(element, 'createTime', TYPES_URN), 'location': location, 'isCustomerImage': is_customer_image } return NodeImage(id=element.get('id'), name=str(findtext(element, 'name', TYPES_URN)), extra=extra, driver=self.connection.driver) def _to_nat_rules(self, object, network_domain): rules = [] for element in findall(object, 'natRule', TYPES_URN): rules.append( self._to_nat_rule(element, network_domain)) return rules def _to_nat_rule(self, element, network_domain): return DimensionDataNatRule( id=element.get('id'), network_domain=network_domain, internal_ip=findtext(element, 'internalIp', TYPES_URN), external_ip=findtext(element, 'externalIp', TYPES_URN), status=findtext(element, 'state', TYPES_URN)) def _to_anti_affinity_rules(self, object): rules = [] for element in findall(object, 'antiAffinityRule', TYPES_URN): rules.append( self._to_anti_affinity_rule(element)) return rules def _to_anti_affinity_rule(self, element): node_list = [] for node in findall(element, 'serverSummary', TYPES_URN): node_list.append(node.get('id')) return DimensionDataAntiAffinityRule( id=element.get('id'), node_list=node_list ) def _to_firewall_rules(self, object, network_domain): rules = [] locations = self.list_locations() for element in findall(object, 'firewallRule', TYPES_URN): rules.append( self._to_firewall_rule(element, locations, network_domain)) return rules def _to_firewall_rule(self, element, locations, network_domain): location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] return DimensionDataFirewallRule( id=element.get('id'), network_domain=network_domain, name=findtext(element, 'name', TYPES_URN), action=findtext(element, 'action', TYPES_URN), ip_version=findtext(element, 'ipVersion', TYPES_URN), protocol=findtext(element, 'protocol', TYPES_URN), enabled=findtext(element, 'enabled', TYPES_URN), source=self._to_firewall_address( element.find(fixxpath('source', TYPES_URN))), destination=self._to_firewall_address( element.find(fixxpath('destination', TYPES_URN))), location=location, status=findtext(element, 'state', TYPES_URN)) def _to_firewall_address(self, element): ip = element.find(fixxpath('ip', TYPES_URN)) port = element.find(fixxpath('port', TYPES_URN)) port_list = element.find(fixxpath('portList', TYPES_URN)) address_list = element.find(fixxpath('ipAddressList', TYPES_URN)) if address_list is None: return DimensionDataFirewallAddress( any_ip=ip.get('address') == 'ANY', ip_address=ip.get('address'), ip_prefix_size=ip.get('prefixSize'), port_begin=port.get('begin') if port is not None else None, port_end=port.get('end') if port is not None else None, port_list_id=port_list.get('id', None) if port_list is not None else None, address_list_id=address_list.get('id') if address_list is not None else None) else: return DimensionDataFirewallAddress( any_ip=False, ip_address=None, ip_prefix_size=None, port_begin=None, port_end=None, port_list_id=port_list.get('id', None) if port_list is not None else None, address_list_id=address_list.get('id') if address_list is not None else None) def _to_ip_blocks(self, object): blocks = [] locations = self.list_locations() for element in findall(object, 'publicIpBlock', TYPES_URN): blocks.append(self._to_ip_block(element, locations)) return blocks def _to_ip_block(self, element, locations): location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] return DimensionDataPublicIpBlock( id=element.get('id'), network_domain=self.ex_get_network_domain( findtext(element, 'networkDomainId', TYPES_URN) ), base_ip=findtext(element, 'baseIp', TYPES_URN), size=findtext(element, 'size', TYPES_URN), location=location, status=findtext(element, 'state', TYPES_URN)) def _to_networks(self, object): networks = [] locations = self.list_locations() for element in findall(object, 'network', NETWORK_NS): networks.append(self._to_network(element, locations)) return networks def _to_network(self, element, locations): multicast = False if findtext(element, 'multicast', NETWORK_NS) == 'true': multicast = True status = self._to_status(element.find(fixxpath('status', NETWORK_NS))) location_id = findtext(element, 'location', NETWORK_NS) location = list(filter(lambda x: x.id == location_id, locations))[0] return DimensionDataNetwork( id=findtext(element, 'id', NETWORK_NS), name=findtext(element, 'name', NETWORK_NS), description=findtext(element, 'description', NETWORK_NS), location=location, private_net=findtext(element, 'privateNet', NETWORK_NS), multicast=multicast, status=status) def _to_network_domains(self, object): network_domains = [] locations = self.list_locations() for element in findall(object, 'networkDomain', TYPES_URN): network_domains.append(self._to_network_domain(element, locations)) return network_domains def _to_network_domain(self, element, locations): location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] plan = findtext(element, 'type', TYPES_URN) if plan is 'ESSENTIALS': plan_type = NetworkDomainServicePlan.ESSENTIALS else: plan_type = NetworkDomainServicePlan.ADVANCED return DimensionDataNetworkDomain( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), description=findtext(element, 'description', TYPES_URN), plan=plan_type, location=location, status=findtext(element, 'state', TYPES_URN)) def _to_vlans(self, object): vlans = [] locations = self.list_locations() for element in findall(object, 'vlan', TYPES_URN): vlans.append(self._to_vlan(element, locations=locations)) return vlans def _to_vlan(self, element, locations): location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] ip_range = element.find(fixxpath('privateIpv4Range', TYPES_URN)) ip6_range = element.find(fixxpath('ipv6Range', TYPES_URN)) network_domain_el = element.find( fixxpath('networkDomain', TYPES_URN)) network_domain = self.ex_get_network_domain( network_domain_el.get('id')) return DimensionDataVlan( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), description=findtext(element, 'description', TYPES_URN), network_domain=network_domain, private_ipv4_range_address=ip_range.get('address'), private_ipv4_range_size=int(ip_range.get('prefixSize')), ipv6_range_address=ip6_range.get('address'), ipv6_range_size=int(ip6_range.get('prefixSize')), ipv4_gateway=findtext( element, 'ipv4GatewayAddress', TYPES_URN), ipv6_gateway=findtext( element, 'ipv6GatewayAddress', TYPES_URN), location=location, status=findtext(element, 'state', TYPES_URN)) def _to_locations(self, object): locations = [] for element in object.findall(fixxpath('datacenter', TYPES_URN)): locations.append(self._to_location(element)) return locations def _to_location(self, element): l = NodeLocation(id=element.get('id'), name=findtext(element, 'displayName', TYPES_URN), country=findtext(element, 'country', TYPES_URN), driver=self) return l def _to_cpu_spec(self, element): return DimensionDataServerCpuSpecification( cpu_count=int(element.get('count')), cores_per_socket=int(element.get('coresPerSocket')), performance=element.get('speed')) def _to_vmware_tools(self, element): status = None if hasattr(element, 'runningStatus'): status = element.get('runningStatus') version_status = None if hasattr(element, 'version_status'): version_status = element.get('version_status') api_version = None if hasattr(element, 'apiVersion'): api_version = element.get('apiVersion') return DimensionDataServerVMWareTools( status=status, version_status=version_status, api_version=api_version) def _to_disks(self, object): disk_elements = object.findall(fixxpath('disk', TYPES_URN)) return [self._to_disk(el) for el in disk_elements] def _to_disk(self, element): return DimensionDataServerDisk( id=element.get('id'), scsi_id=int(element.get('scsiId')), size_gb=int(element.get('sizeGb')), speed=element.get('speed'), state=element.get('state') ) def _to_nodes(self, object): node_elements = object.findall(fixxpath('server', TYPES_URN)) return [self._to_node(el) for el in node_elements] def _to_node(self, element): started = findtext(element, 'started', TYPES_URN) status = self._to_status(element.find(fixxpath('progress', TYPES_URN))) dd_state = findtext(element, 'state', TYPES_URN) node_state = self._get_node_state(dd_state, started, status.action) has_network_info \ = element.find(fixxpath('networkInfo', TYPES_URN)) is not None cpu_spec = self._to_cpu_spec(element.find(fixxpath('cpu', TYPES_URN))) disks = self._to_disks(element) # Vmware Tools # Version 2.3 or earlier if LooseVersion(self.connection.active_api_version) < LooseVersion( '2.4'): vmware_tools = self._to_vmware_tools( element.find(fixxpath('vmwareTools', TYPES_URN))) operation_system = element.find(fixxpath( 'operatingSystem', TYPES_URN)) # Version 2.4 or later else: vmtools_elm = fixxpath('guest/vmTools', TYPES_URN) if vmtools_elm is not None: vmware_tools = self._to_vmware_tools(vmtools_elm) operation_system = element.find(fixxpath( 'guest/operatingSystem', TYPES_URN)) extra = { 'description': findtext(element, 'description', TYPES_URN), 'sourceImageId': findtext(element, 'sourceImageId', TYPES_URN), 'networkId': findtext(element, 'networkId', TYPES_URN), 'networkDomainId': element.find(fixxpath('networkInfo', TYPES_URN)) .get('networkDomainId') if has_network_info else None, 'datacenterId': element.get('datacenterId'), 'deployedTime': findtext(element, 'createTime', TYPES_URN), 'cpu': cpu_spec, 'memoryMb': int(findtext( element, 'memoryGb', TYPES_URN)) * 1024, 'OS_id': operation_system.get('id'), 'OS_type': operation_system.get('family'), 'OS_displayName': operation_system.get('displayName'), 'status': status, 'disks': disks, 'vmWareTools': vmware_tools } public_ip = findtext(element, 'publicIpAddress', TYPES_URN) private_ip = element.find( fixxpath('networkInfo/primaryNic', TYPES_URN)) \ .get('privateIpv4') \ if has_network_info else \ element.find(fixxpath('nic', TYPES_URN)).get('privateIpv4') extra['ipv6'] = element.find( fixxpath('networkInfo/primaryNic', TYPES_URN)) \ .get('ipv6') \ if has_network_info else \ element.find(fixxpath('nic', TYPES_URN)).get('ipv6') n = Node(id=element.get('id'), name=findtext(element, 'name', TYPES_URN), state=node_state, public_ips=[public_ip] if public_ip is not None else [], private_ips=[private_ip] if private_ip is not None else [], size=self.list_sizes()[0], image=NodeImage(extra['sourceImageId'], extra['OS_displayName'], self.connection.driver), driver=self.connection.driver, extra=extra) return n def _to_status(self, element): if element is None: return DimensionDataStatus() s = DimensionDataStatus(action=findtext(element, 'action', TYPES_URN), request_time=findtext( element, 'requestTime', TYPES_URN), user_name=findtext( element, 'userName', TYPES_URN), number_of_steps=findtext( element, 'numberOfSteps', TYPES_URN), step_name=findtext( element, 'step/name', TYPES_URN), step_number=findtext( element, 'step_number', TYPES_URN), step_percent_complete=findtext( element, 'step/percentComplete', TYPES_URN), failure_reason=findtext( element, 'failureReason', TYPES_URN)) return s def _to_ip_address_lists(self, object): ip_address_lists = [] for element in findall(object, 'ipAddressList', TYPES_URN): ip_address_lists.append(self._to_ip_address_list(element)) return ip_address_lists def _to_ip_address_list(self, element): ipAddresses = [] for ip in findall(element, 'ipAddress', TYPES_URN): ipAddresses.append(self._to_ip_address(ip)) child_ip_address_lists = [] for child_ip_list in findall(element, 'childIpAddressList', TYPES_URN): child_ip_address_lists.append(self ._to_child_ip_list(child_ip_list)) return DimensionDataIpAddressList( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), description=findtext(element, 'description', TYPES_URN), ip_version=findtext(element, 'ipVersion', TYPES_URN), ip_address_collection=ipAddresses, state=findtext(element, 'state', TYPES_URN), create_time=findtext(element, 'createTime', TYPES_URN), child_ip_address_lists=child_ip_address_lists ) def _to_child_ip_list(self, element): return DimensionDataChildIpAddressList( id=element.get('id'), name=element.get('name') ) def _to_ip_address(self, element): return DimensionDataIpAddress( begin=element.get('begin'), end=element.get('end'), prefix_size=element.get('prefixSize') ) def _to_port_lists(self, object): port_lists = [] for element in findall(object, 'portList', TYPES_URN): port_lists.append(self._to_port_list(element)) return port_lists def _to_port_list(self, element): ports = [] for port in findall(element, 'port', TYPES_URN): ports.append(self._to_port(element=port)) child_portlist_list = [] for child in findall(element, 'childPortList', TYPES_URN): child_portlist_list.append( self._to_child_port_list(element=child)) return DimensionDataPortList( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), description=findtext(element, 'description', TYPES_URN), port_collection=ports, child_portlist_list=child_portlist_list, state=findtext(element, 'state', TYPES_URN), create_time=findtext(element, 'createTime', TYPES_URN) ) def _image_needs_auth(self, image): if not isinstance(image, NodeImage): image = self.ex_get_image_by_id(image) if image.extra['isCustomerImage'] and image.extra['OS_type'] == 'UNIX': return False return True @staticmethod def _to_port(element): return DimensionDataPort( begin=element.get('begin'), end=element.get('end') ) @staticmethod def _to_child_port_list(element): return DimensionDataChildPortList( id=element.get('id'), name=element.get('name') ) @staticmethod def _get_node_state(state, started, action): try: return NODE_STATE_MAP[(state, started, action)] except KeyError: if started == 'true': return NodeState.RUNNING else: return NodeState.TERMINATED @staticmethod def _node_to_node_id(node): return dd_object_to_id(node, Node) @staticmethod def _location_to_location_id(location): return dd_object_to_id(location, NodeLocation) @staticmethod def _vlan_to_vlan_id(vlan): return dd_object_to_id(vlan, DimensionDataVlan) @staticmethod def _image_to_image_id(image): return dd_object_to_id(image, NodeImage) @staticmethod def _network_to_network_id(network): return dd_object_to_id(network, DimensionDataNetwork) @staticmethod def _anti_affinity_rule_to_anti_affinity_rule_id(rule): return dd_object_to_id(rule, DimensionDataAntiAffinityRule) @staticmethod def _network_domain_to_network_domain_id(network_domain): return dd_object_to_id(network_domain, DimensionDataNetworkDomain) @staticmethod def _tag_key_to_tag_key_id(tag_key): return dd_object_to_id(tag_key, DimensionDataTagKey) @staticmethod def _tag_key_to_tag_key_name(tag_key): return dd_object_to_id(tag_key, DimensionDataTagKey, id_value='name') @staticmethod def _ip_address_list_to_ip_address_list_id(ip_addr_list): return dd_object_to_id(ip_addr_list, DimensionDataIpAddressList, id_value='id') @staticmethod def _child_ip_address_list_to_child_ip_address_list_id(child_ip_addr_list): return dd_object_to_id(child_ip_addr_list, DimensionDataChildIpAddressList, id_value='id') @staticmethod def _port_list_to_port_list_id(port_list): return dd_object_to_id(port_list, DimensionDataPortList, id_value='id') @staticmethod def _child_port_list_to_child_port_list_id(child_port_list): return dd_object_to_id(child_port_list, DimensionDataChildPortList, id_value='id') @staticmethod def _str2bool(string): return string.lower() in ("true") apache-libcloud-2.2.1/libcloud/compute/base.py0000664000175000017500000014324713153541406021170 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides base classes for working with drivers """ from __future__ import with_statement import sys import time import hashlib import os import socket import random import binascii from libcloud.utils.py3 import b import libcloud.compute.ssh from libcloud.pricing import get_size_price from libcloud.compute.types import NodeState, StorageVolumeState,\ DeploymentError from libcloud.compute.ssh import SSHClient from libcloud.common.base import ConnectionKey from libcloud.common.base import BaseDriver from libcloud.common.types import LibcloudError from libcloud.compute.ssh import have_paramiko from libcloud.utils.networking import is_private_subnet from libcloud.utils.networking import is_valid_ip_address if have_paramiko: from paramiko.ssh_exception import SSHException from paramiko.ssh_exception import AuthenticationException SSH_TIMEOUT_EXCEPTION_CLASSES = (AuthenticationException, SSHException, IOError, socket.gaierror, socket.error) else: SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error) # How long to wait for the node to come online after creating it NODE_ONLINE_WAIT_TIMEOUT = 10 * 60 # How long to try connecting to a remote SSH server when running a deployment # script. SSH_CONNECT_TIMEOUT = 5 * 60 __all__ = [ 'Node', 'NodeState', 'NodeSize', 'NodeImage', 'NodeLocation', 'NodeAuthSSHKey', 'NodeAuthPassword', 'NodeDriver', 'StorageVolume', 'StorageVolumeState', 'VolumeSnapshot', # Deprecated, moved to libcloud.utils.networking 'is_private_subnet', 'is_valid_ip_address' ] class UuidMixin(object): """ Mixin class for get_uuid function. """ def __init__(self): self._uuid = None def get_uuid(self): """ Unique hash for a node, node image, or node size The hash is a function of an SHA1 hash of the node, node image, or node size's ID and its driver which means that it should be unique between all objects of its type. In some subclasses (e.g. GoGridNode) there is no ID available so the public IP address is used. This means that, unlike a properly done system UUID, the same UUID may mean a different system install at a different time >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node = driver.create_node() >>> node.get_uuid() 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' Note, for example, that this example will always produce the same UUID! :rtype: ``str`` """ if not self._uuid: self._uuid = hashlib.sha1(b('%s:%s' % (self.id, self.driver.type))).hexdigest() return self._uuid @property def uuid(self): return self.get_uuid() class Node(UuidMixin): """ Provide a common interface for handling nodes of all types. The Node object provides the interface in libcloud through which we can manipulate nodes in different cloud providers in the same way. Node objects don't actually do much directly themselves, instead the node driver handles the connection to the node. You don't normally create a node object yourself; instead you use a driver and then have that create the node for you. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node = driver.create_node() >>> node.public_ips[0] '127.0.0.3' >>> node.name 'dummy-3' You can also get nodes from the driver's list_node function. >>> node = driver.list_nodes()[0] >>> node.name 'dummy-1' The node keeps a reference to its own driver which means that we can work on nodes from different providers without having to know which is which. >>> driver = DummyNodeDriver(72) >>> node2 = driver.create_node() >>> node.driver.creds 0 >>> node2.driver.creds 72 Although Node objects can be subclassed, this isn't normally done. Instead, any driver specific information is stored in the "extra" attribute of the node. >>> node.extra {'foo': 'bar'} """ def __init__(self, id, name, state, public_ips, private_ips, driver, size=None, image=None, extra=None, created_at=None): """ :param id: Node ID. :type id: ``str`` :param name: Node name. :type name: ``str`` :param state: Node state. :type state: :class:`libcloud.compute.types.NodeState` :param public_ips: Public IP addresses associated with this node. :type public_ips: ``list`` :param private_ips: Private IP addresses associated with this node. :type private_ips: ``list`` :param driver: Driver this node belongs to. :type driver: :class:`.NodeDriver` :param size: Size of this node. (optional) :type size: :class:`.NodeSize` :param image: Image of this node. (optional) :type image: :class:`.NodeImage` :param created_at: The datetime this node was created (optional) :type created_at: :class: `datetime.datetime` :param extra: Optional provider specific attributes associated with this node. :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.state = state self.public_ips = public_ips if public_ips else [] self.private_ips = private_ips if private_ips else [] self.driver = driver self.size = size self.created_at = created_at self.image = image self.extra = extra or {} UuidMixin.__init__(self) def reboot(self): """ Reboot this node :return: ``bool`` This calls the node's driver and reboots the node >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node = driver.create_node() >>> node.state == NodeState.RUNNING True >>> node.state == NodeState.REBOOTING False >>> node.reboot() True >>> node.state == NodeState.REBOOTING True """ return self.driver.reboot_node(self) def destroy(self): """ Destroy this node :return: ``bool`` This calls the node's driver and destroys the node >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> from libcloud.compute.types import NodeState >>> node = driver.create_node() >>> node.state == NodeState.RUNNING True >>> node.destroy() True >>> node.state == NodeState.RUNNING False """ return self.driver.destroy_node(self) def __repr__(self): state = NodeState.tostring(self.state) return (('') % (self.uuid, self.name, state, self.public_ips, self.private_ips, self.driver.name)) class NodeSize(UuidMixin): """ A Base NodeSize class to derive from. NodeSizes are objects which are typically returned a driver's list_sizes function. They contain a number of different parameters which define how big an image is. The exact parameters available depends on the provider. N.B. Where a parameter is "unlimited" (for example bandwidth in Amazon) this will be given as 0. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> size = driver.list_sizes()[0] >>> size.ram 128 >>> size.bandwidth 500 >>> size.price 4 """ def __init__(self, id, name, ram, disk, bandwidth, price, driver, extra=None): """ :param id: Size ID. :type id: ``str`` :param name: Size name. :type name: ``str`` :param ram: Amount of memory (in MB) provided by this size. :type ram: ``int`` :param disk: Amount of disk storage (in GB) provided by this image. :type disk: ``int`` :param bandwidth: Amount of bandiwdth included with this size. :type bandwidth: ``int`` :param price: Price (in US dollars) of running this node for an hour. :type price: ``float`` :param driver: Driver this size belongs to. :type driver: :class:`.NodeDriver` :param extra: Optional provider specific attributes associated with this size. :type extra: ``dict`` """ self.id = str(id) self.name = name self.ram = ram self.disk = disk self.bandwidth = bandwidth self.price = price self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return (('') % (self.id, self.name, self.ram, self.disk, self.bandwidth, self.price, self.driver.name)) class NodeImage(UuidMixin): """ An operating system image. NodeImage objects are typically returned by the driver for the cloud provider in response to the list_images function >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> image = driver.list_images()[0] >>> image.name 'Ubuntu 9.10' Apart from name and id, there is no further standard information; other parameters are stored in a driver specific "extra" variable When creating a node, a node image should be given as an argument to the create_node function to decide which OS image to use. >>> node = driver.create_node(image=image) """ def __init__(self, id, name, driver, extra=None): """ :param id: Image ID. :type id: ``str`` :param name: Image name. :type name: ``str`` :param driver: Driver this image belongs to. :type driver: :class:`.NodeDriver` :param extra: Optional provided specific attributes associated with this image. :type extra: ``dict`` """ self.id = str(id) self.name = name self.driver = driver self.extra = extra or {} UuidMixin.__init__(self) def __repr__(self): return (('') % (self.id, self.name, self.driver.name)) class NodeLocation(object): """ A physical location where nodes can be. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> location = driver.list_locations()[0] >>> location.country 'US' """ def __init__(self, id, name, country, driver): """ :param id: Location ID. :type id: ``str`` :param name: Location name. :type name: ``str`` :param country: Location country. :type country: ``str`` :param driver: Driver this location belongs to. :type driver: :class:`.NodeDriver` """ self.id = str(id) self.name = name self.country = country self.driver = driver def __repr__(self): return (('') % (self.id, self.name, self.country, self.driver.name)) class NodeAuthSSHKey(object): """ An SSH key to be installed for authentication to a node. This is the actual contents of the users ssh public key which will normally be installed as root's public key on the node. >>> pubkey = '...' # read from file >>> from libcloud.compute.base import NodeAuthSSHKey >>> k = NodeAuthSSHKey(pubkey) >>> k """ def __init__(self, pubkey): """ :param pubkey: Public key matetiral. :type pubkey: ``str`` """ self.pubkey = pubkey def __repr__(self): return '' class NodeAuthPassword(object): """ A password to be used for authentication to a node. """ def __init__(self, password, generated=False): """ :param password: Password. :type password: ``str`` :type generated: ``True`` if this password was automatically generated, ``False`` otherwise. """ self.password = password self.generated = generated def __repr__(self): return '' class StorageVolume(UuidMixin): """ A base StorageVolume class to derive from. """ def __init__(self, id, name, size, driver, state=None, extra=None): """ :param id: Storage volume ID. :type id: ``str`` :param name: Storage volume name. :type name: ``str`` :param size: Size of this volume (in GB). :type size: ``int`` :param driver: Driver this image belongs to. :type driver: :class:`.NodeDriver` :param state: Optional state of the StorageVolume. If not provided, will default to UNKNOWN. :type state: :class:`.StorageVolumeState` :param extra: Optional provider specific attributes. :type extra: ``dict`` """ self.id = id self.name = name self.size = size self.driver = driver self.extra = extra self.state = state UuidMixin.__init__(self) def list_snapshots(self): """ :rtype: ``list`` of ``VolumeSnapshot`` """ return self.driver.list_volume_snapshots(volume=self) def attach(self, node, device=None): """ Attach this volume to a node. :param node: Node to attach volume to :type node: :class:`.Node` :param device: Where the device is exposed, e.g. '/dev/sdb (optional) :type device: ``str`` :return: ``True`` if attach was successful, ``False`` otherwise. :rtype: ``bool`` """ return self.driver.attach_volume(node=node, volume=self, device=device) def detach(self): """ Detach this volume from its node :return: ``True`` if detach was successful, ``False`` otherwise. :rtype: ``bool`` """ return self.driver.detach_volume(volume=self) def snapshot(self, name): """ Creates a snapshot of this volume. :return: Created snapshot. :rtype: ``VolumeSnapshot`` """ return self.driver.create_volume_snapshot(volume=self, name=name) def destroy(self): """ Destroy this storage volume. :return: ``True`` if destroy was successful, ``False`` otherwise. :rtype: ``bool`` """ return self.driver.destroy_volume(volume=self) def __repr__(self): return '' % ( self.id, self.size, self.driver.name) class VolumeSnapshot(object): """ A base VolumeSnapshot class to derive from. """ def __init__(self, id, driver, size=None, extra=None, created=None, state=None, name=None): """ VolumeSnapshot constructor. :param id: Snapshot ID. :type id: ``str`` :param driver: The driver that represents a connection to the provider :type driver: `NodeDriver` :param size: A snapshot size in GB. :type size: ``int`` :param extra: Provider depends parameters for snapshot. :type extra: ``dict`` :param created: A datetime object that represents when the snapshot was created :type created: ``datetime.datetime`` :param state: A string representing the state the snapshot is in. See `libcloud.compute.types.StorageVolumeState`. :type state: ``str`` :param name: A string representing the name of the snapshot :type name: ``str`` """ self.id = id self.driver = driver self.size = size self.extra = extra or {} self.created = created self.state = state self.name = name def destroy(self): """ Destroys this snapshot. :rtype: ``bool`` """ return self.driver.destroy_volume_snapshot(snapshot=self) def __repr__(self): return ('' % (self.name, self.id, self.size, self.driver.name, self.state)) class KeyPair(object): """ Represents a SSH key pair. """ def __init__(self, name, public_key, fingerprint, driver, private_key=None, extra=None): """ Constructor. :keyword name: Name of the key pair object. :type name: ``str`` :keyword fingerprint: Key fingerprint. :type fingerprint: ``str`` :keyword public_key: Public key in OpenSSH format. :type public_key: ``str`` :keyword private_key: Private key in PEM format. :type private_key: ``str`` :keyword extra: Provider specific attributes associated with this key pair. (optional) :type extra: ``dict`` """ self.name = name self.fingerprint = fingerprint self.public_key = public_key self.private_key = private_key self.driver = driver self.extra = extra or {} def __repr__(self): return ('' % (self.name, self.fingerprint, self.driver.name)) class NodeDriver(BaseDriver): """ A base NodeDriver class to derive from This class is always subclassed by a specific driver. For examples of base behavior of most functions (except deploy node) see the dummy driver. """ connectionCls = ConnectionKey name = None type = None port = None features = {'create_node': []} """ List of available features for a driver. - :meth:`libcloud.compute.base.NodeDriver.create_node` - ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication method for nodes. - password: Supports :class:`.NodeAuthPassword` as an authentication method for nodes. - generates_password: Returns a password attribute on the Node object returned from creation. """ NODE_STATE_MAP = {} def list_nodes(self): """ List all nodes. :return: list of node objects :rtype: ``list`` of :class:`.Node` """ raise NotImplementedError( 'list_nodes not implemented for this driver') def list_sizes(self, location=None): """ List sizes on a provider :param location: The location at which to list sizes :type location: :class:`.NodeLocation` :return: list of node size objects :rtype: ``list`` of :class:`.NodeSize` """ raise NotImplementedError( 'list_sizes not implemented for this driver') def list_locations(self): """ List data centers for a provider :return: list of node location objects :rtype: ``list`` of :class:`.NodeLocation` """ raise NotImplementedError( 'list_locations not implemented for this driver') def create_node(self, **kwargs): """ Create a new node instance. This instance will be started automatically. Not all hosting API's are created equal and to allow libcloud to support as many as possible there are some standard supported variations of ``create_node``. These are declared using a ``features`` API. You can inspect ``driver.features['create_node']`` to see what variation of the API you are dealing with: ``ssh_key`` You can inject a public key into a new node allows key based SSH authentication. ``password`` You can inject a password into a new node for SSH authentication. If no password is provided libcloud will generated a password. The password will be available as ``return_value.extra['password']``. ``generates_password`` The hosting provider will generate a password. It will be returned to you via ``return_value.extra['password']``. Some drivers allow you to set how you will authenticate with the instance that is created. You can inject this initial authentication information via the ``auth`` parameter. If a driver supports the ``ssh_key`` feature flag for ``created_node`` you can upload a public key into the new instance:: >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> auth = NodeAuthSSHKey('pubkey data here') >>> node = driver.create_node("test_node", auth=auth) If a driver supports the ``password`` feature flag for ``create_node`` you can set a password:: >>> driver = DummyNodeDriver(0) >>> auth = NodeAuthPassword('mysecretpassword') >>> node = driver.create_node("test_node", auth=auth) If a driver supports the ``password`` feature and you don't provide the ``auth`` argument libcloud will assign a password:: >>> driver = DummyNodeDriver(0) >>> node = driver.create_node("test_node") >>> password = node.extra['password'] A password will also be returned in this way for drivers that declare the ``generates_password`` feature, though in that case the password is actually provided to the driver API by the hosting provider rather than generated by libcloud. You can only pass a :class:`.NodeAuthPassword` or :class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if has the corresponding feature flag. :param name: String with a name for this new node (required) :type name: ``str`` :param size: The size of resources allocated to this node. (required) :type size: :class:`.NodeSize` :param image: OS Image to boot on node. (required) :type image: :class:`.NodeImage` :param location: Which data center to create a node in. If empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param auth: Initial authentication information for the node (optional) :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword` :return: The newly created node. :rtype: :class:`.Node` """ raise NotImplementedError( 'create_node not implemented for this driver') def deploy_node(self, **kwargs): """ Create a new node, and start deployment. In order to be able to SSH into a created node access credentials are required. A user can pass either a :class:`.NodeAuthPassword` or :class:`.NodeAuthSSHKey` to the ``auth`` argument. If the ``create_node`` implementation supports that kind if credential (as declared in ``self.features['create_node']``) then it is passed on to ``create_node``. Otherwise it is not passed on to ``create_node`` and it is only used for authentication. If the ``auth`` parameter is not supplied but the driver declares it supports ``generates_password`` then the password returned by ``create_node`` will be used to SSH into the server. Finally, if the ``ssh_key_file`` is supplied that key will be used to SSH into the server. This function may raise a :class:`DeploymentException`, if a create_node call was successful, but there is a later error (like SSH failing or timing out). This exception includes a Node object which you may want to destroy if incomplete deployments are not desirable. >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> from libcloud.compute.deployment import ScriptDeployment >>> from libcloud.compute.deployment import MultiStepDeployment >>> from libcloud.compute.base import NodeAuthSSHKey >>> driver = DummyNodeDriver(0) >>> key = NodeAuthSSHKey('...') # read from file >>> script = ScriptDeployment("yum -y install emacs strace tcpdump") >>> msd = MultiStepDeployment([key, script]) >>> def d(): ... try: ... driver.deploy_node(deploy=msd) ... except NotImplementedError: ... print ("not implemented for dummy driver") >>> d() not implemented for dummy driver Deploy node is typically not overridden in subclasses. The existing implementation should be able to handle most such. :param deploy: Deployment to run once machine is online and available to SSH. :type deploy: :class:`Deployment` :param ssh_username: Optional name of the account which is used when connecting to SSH server (default is root) :type ssh_username: ``str`` :param ssh_alternate_usernames: Optional list of ssh usernames to try to connect with if using the default one fails :type ssh_alternate_usernames: ``list`` :param ssh_port: Optional SSH server port (default is 22) :type ssh_port: ``int`` :param ssh_timeout: Optional SSH connection timeout in seconds (default is 10) :type ssh_timeout: ``float`` :param auth: Initial authentication information for the node (optional) :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword` :param ssh_key: A path (or paths) to an SSH private key with which to attempt to authenticate. (optional) :type ssh_key: ``str`` or ``list`` of ``str`` :param timeout: How many seconds to wait before timing out. (default is 600) :type timeout: ``int`` :param max_tries: How many times to retry if a deployment fails before giving up (default is 3) :type max_tries: ``int`` :param ssh_interface: The interface to wait for. Default is 'public_ips', other option is 'private_ips'. :type ssh_interface: ``str`` """ if not libcloud.compute.ssh.have_paramiko: raise RuntimeError('paramiko is not installed. You can install ' + 'it using pip: pip install paramiko') if 'auth' in kwargs: auth = kwargs['auth'] if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)): raise NotImplementedError( 'If providing auth, only NodeAuthSSHKey or' 'NodeAuthPassword is supported') elif 'ssh_key' in kwargs: # If an ssh_key is provided we can try deploy_node pass elif 'create_node' in self.features: f = self.features['create_node'] if 'generates_password' not in f and "password" not in f: raise NotImplementedError( 'deploy_node not implemented for this driver') else: raise NotImplementedError( 'deploy_node not implemented for this driver') node = self.create_node(**kwargs) max_tries = kwargs.get('max_tries', 3) password = None if 'auth' in kwargs: if isinstance(kwargs['auth'], NodeAuthPassword): password = kwargs['auth'].password elif 'password' in node.extra: password = node.extra['password'] ssh_interface = kwargs.get('ssh_interface', 'public_ips') # Wait until node is up and running and has IP assigned try: node, ip_addresses = self.wait_until_running( nodes=[node], wait_period=3, timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT), ssh_interface=ssh_interface)[0] except Exception: e = sys.exc_info()[1] raise DeploymentError(node=node, original_exception=e, driver=self) ssh_username = kwargs.get('ssh_username', 'root') ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', []) ssh_port = kwargs.get('ssh_port', 22) ssh_timeout = kwargs.get('ssh_timeout', 10) ssh_key_file = kwargs.get('ssh_key', None) timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT) deploy_error = None for username in ([ssh_username] + ssh_alternate_usernames): try: self._connect_and_run_deployment_script( task=kwargs['deploy'], node=node, ssh_hostname=ip_addresses[0], ssh_port=ssh_port, ssh_username=username, ssh_password=password, ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout, timeout=timeout, max_tries=max_tries) except Exception: # Try alternate username # Todo: Need to fix paramiko so we can catch a more specific # exception e = sys.exc_info()[1] deploy_error = e else: # Script successfully executed, don't try alternate username deploy_error = None break if deploy_error is not None: raise DeploymentError(node=node, original_exception=deploy_error, driver=self) return node def reboot_node(self, node): """ Reboot a node. :param node: The node to be rebooted :type node: :class:`.Node` :return: True if the reboot was successful, otherwise False :rtype: ``bool`` """ raise NotImplementedError( 'reboot_node not implemented for this driver') def destroy_node(self, node): """ Destroy a node. Depending upon the provider, this may destroy all data associated with the node, including backups. :param node: The node to be destroyed :type node: :class:`.Node` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'destroy_node not implemented for this driver') ## # Volume and snapshot management methods ## def list_volumes(self): """ List storage volumes. :rtype: ``list`` of :class:`.StorageVolume` """ raise NotImplementedError( 'list_volumes not implemented for this driver') def list_volume_snapshots(self, volume): """ List snapshots for a storage volume. :rtype: ``list`` of :class:`VolumeSnapshot` """ raise NotImplementedError( 'list_volume_snapshots not implemented for this driver') def create_volume(self, size, name, location=None, snapshot=None): """ Create a new volume. :param size: Size of volume in gigabytes (required) :type size: ``int`` :param name: Name of the volume to be created :type name: ``str`` :param location: Which data center to create a volume in. If empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param snapshot: Snapshot from which to create the new volume. (optional) :type snapshot: :class:`.VolumeSnapshot` :return: The newly created volume. :rtype: :class:`StorageVolume` """ raise NotImplementedError( 'create_volume not implemented for this driver') def create_volume_snapshot(self, volume, name=None): """ Creates a snapshot of the storage volume. :param volume: The StorageVolume to create a VolumeSnapshot from :type volume: :class:`.VolumeSnapshot` :param name: Name of created snapshot (optional) :type name: `str` :rtype: :class:`VolumeSnapshot` """ raise NotImplementedError( 'create_volume_snapshot not implemented for this driver') def attach_volume(self, node, volume, device=None): """ Attaches volume to node. :param node: Node to attach volume to. :type node: :class:`.Node` :param volume: Volume to attach. :type volume: :class:`.StorageVolume` :param device: Where the device is exposed, e.g. '/dev/sdb' :type device: ``str`` :rytpe: ``bool`` """ raise NotImplementedError('attach not implemented for this driver') def detach_volume(self, volume): """ Detaches a volume from a node. :param volume: Volume to be detached :type volume: :class:`.StorageVolume` :rtype: ``bool`` """ raise NotImplementedError('detach not implemented for this driver') def destroy_volume(self, volume): """ Destroys a storage volume. :param volume: Volume to be destroyed :type volume: :class:`StorageVolume` :rtype: ``bool`` """ raise NotImplementedError( 'destroy_volume not implemented for this driver') def destroy_volume_snapshot(self, snapshot): """ Destroys a snapshot. :param snapshot: The snapshot to delete :type snapshot: :class:`VolumeSnapshot` :rtype: :class:`bool` """ raise NotImplementedError( 'destroy_volume_snapshot not implemented for this driver') ## # Image management methods ## def list_images(self, location=None): """ List images on a provider. :param location: The location at which to list images. :type location: :class:`.NodeLocation` :return: list of node image objects. :rtype: ``list`` of :class:`.NodeImage` """ raise NotImplementedError( 'list_images not implemented for this driver') def create_image(self, node, name, description=None): """ Creates an image from a node object. :param node: Node to run the task on. :type node: :class:`.Node` :param name: name for new image. :type name: ``str`` :param description: description for new image. :type name: ``description`` :rtype: :class:`.NodeImage`: :return: NodeImage instance on success. """ raise NotImplementedError( 'create_image not implemented for this driver') def delete_image(self, node_image): """ Deletes a node image from a provider. :param node_image: Node image object. :type node_image: :class:`.NodeImage` :return: ``True`` if delete_image was successful, ``False`` otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'delete_image not implemented for this driver') def get_image(self, image_id): """ Returns a single node image from a provider. :param image_id: Node to run the task on. :type image_id: ``str`` :rtype :class:`.NodeImage`: :return: NodeImage instance on success. """ raise NotImplementedError( 'get_image not implemented for this driver') def copy_image(self, source_region, node_image, name, description=None): """ Copies an image from a source region to the current region. :param source_region: Region to copy the node from. :type source_region: ``str`` :param node_image: NodeImage to copy. :type node_image: :class:`.NodeImage`: :param name: name for new image. :type name: ``str`` :param description: description for new image. :type name: ``str`` :rtype: :class:`.NodeImage`: :return: NodeImage instance on success. """ raise NotImplementedError( 'copy_image not implemented for this driver') ## # SSH key pair management methods ## def list_key_pairs(self): """ List all the available key pair objects. :rtype: ``list`` of :class:`.KeyPair` objects """ raise NotImplementedError( 'list_key_pairs not implemented for this driver') def get_key_pair(self, name): """ Retrieve a single key pair. :param name: Name of the key pair to retrieve. :type name: ``str`` :rtype: :class:`.KeyPair` """ raise NotImplementedError( 'get_key_pair not implemented for this driver') def create_key_pair(self, name): """ Create a new key pair object. :param name: Key pair name. :type name: ``str`` """ raise NotImplementedError( 'create_key_pair not implemented for this driver') def import_key_pair_from_string(self, name, key_material): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :rtype: :class:`.KeyPair` object """ raise NotImplementedError( 'import_key_pair_from_string not implemented for this driver') def import_key_pair_from_file(self, name, key_file_path): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_file_path: Path to the public key file. :type key_file_path: ``str`` :rtype: :class:`.KeyPair` object """ key_file_path = os.path.expanduser(key_file_path) with open(key_file_path, 'r') as fp: key_material = fp.read() return self.import_key_pair_from_string(name=name, key_material=key_material) def delete_key_pair(self, key_pair): """ Delete an existing key pair. :param key_pair: Key pair object. :type key_pair: :class:`.KeyPair` """ raise NotImplementedError( 'delete_key_pair not implemented for this driver') def wait_until_running(self, nodes, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True, ex_list_nodes_kwargs=None): """ Block until the provided nodes are considered running. Node is considered running when it's state is "running" and when it has at least one IP address assigned. :param nodes: List of nodes to wait for. :type nodes: ``list`` of :class:`.Node` :param wait_period: How many seconds to wait between each loop iteration. (default is 3) :type wait_period: ``int`` :param timeout: How many seconds to wait before giving up. (default is 600) :type timeout: ``int`` :param ssh_interface: Which attribute on the node to use to obtain an IP address. Valid options: public_ips, private_ips. Default is public_ips. :type ssh_interface: ``str`` :param force_ipv4: Ignore IPv6 addresses (default is True). :type force_ipv4: ``bool`` :param ex_list_nodes_kwargs: Optional driver-specific keyword arguments which are passed to the ``list_nodes`` method. :type ex_list_nodes_kwargs: ``dict`` :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and list of ip_address on success. :rtype: ``list`` of ``tuple`` """ ex_list_nodes_kwargs = ex_list_nodes_kwargs or {} def is_supported(address): """ Return True for supported address. """ if force_ipv4 and not is_valid_ip_address(address=address, family=socket.AF_INET): return False return True def filter_addresses(addresses): """ Return list of supported addresses. """ return [address for address in addresses if is_supported(address)] if ssh_interface not in ['public_ips', 'private_ips']: raise ValueError('ssh_interface argument must either be' + 'public_ips or private_ips') start = time.time() end = start + timeout uuids = set([node.uuid for node in nodes]) while time.time() < end: all_nodes = self.list_nodes(**ex_list_nodes_kwargs) matching_nodes = list([node for node in all_nodes if node.uuid in uuids]) if len(matching_nodes) > len(uuids): found_uuids = [node.uuid for node in matching_nodes] msg = ('Unable to match specified uuids ' + '(%s) with existing nodes. Found ' % (uuids) + 'multiple nodes with same uuid: (%s)' % (found_uuids)) raise LibcloudError(value=msg, driver=self) running_nodes = [node for node in matching_nodes if node.state == NodeState.RUNNING] addresses = [filter_addresses(getattr(node, ssh_interface)) for node in running_nodes] if len(running_nodes) == len(uuids) == len(addresses): return list(zip(running_nodes, addresses)) else: time.sleep(wait_period) continue raise LibcloudError(value='Timed out after %s seconds' % (timeout), driver=self) def _get_and_check_auth(self, auth): """ Helper function for providers supporting :class:`.NodeAuthPassword` or :class:`.NodeAuthSSHKey` Validates that only a supported object type is passed to the auth parameter and raises an exception if it is not. If no :class:`.NodeAuthPassword` object is provided but one is expected then a password is automatically generated. """ if isinstance(auth, NodeAuthPassword): if 'password' in self.features['create_node']: return auth raise LibcloudError( 'Password provided as authentication information, but password' 'not supported', driver=self) if isinstance(auth, NodeAuthSSHKey): if 'ssh_key' in self.features['create_node']: return auth raise LibcloudError( 'SSH Key provided as authentication information, but SSH Key' 'not supported', driver=self) if 'password' in self.features['create_node']: value = os.urandom(16) value = binascii.hexlify(value).decode('ascii') # Some providers require password to also include uppercase # characters so convert some characters to uppercase password = '' for char in value: if not char.isdigit() and char.islower(): if random.randint(0, 1) == 1: char = char.upper() password += char return NodeAuthPassword(password, generated=True) if auth: raise LibcloudError( '"auth" argument provided, but it was not a NodeAuthPassword' 'or NodeAuthSSHKey object', driver=self) def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): # This is here for backward compatibility and will be removed in the # next major release return self.wait_until_running(nodes=[node], wait_period=wait_period, timeout=timeout, ssh_interface=ssh_interface, force_ipv4=force_ipv4) def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): """ Try to connect to the remote SSH server. If a connection times out or is refused it is retried up to timeout number of seconds. :param ssh_client: A configured SSHClient instance :type ssh_client: ``SSHClient`` :param wait_period: How many seconds to wait between each loop iteration. (default is 1.5) :type wait_period: ``int`` :param timeout: How many seconds to wait before giving up. (default is 300) :type timeout: ``int`` :return: ``SSHClient`` on success """ start = time.time() end = start + timeout while time.time() < end: try: ssh_client.connect() except SSH_TIMEOUT_EXCEPTION_CLASSES: e = sys.exc_info()[1] message = str(e).lower() expected_msg = 'no such file or directory' if isinstance(e, IOError) and expected_msg in message: # Propagate (key) file doesn't exist errors raise e # Retry if a connection is refused, timeout occurred, # or the connection fails due to failed authentication. ssh_client.close() time.sleep(wait_period) continue else: return ssh_client raise LibcloudError(value='Could not connect to the remote SSH ' + 'server. Giving up.', driver=self) def _connect_and_run_deployment_script(self, task, node, ssh_hostname, ssh_port, ssh_username, ssh_password, ssh_key_file, ssh_timeout, timeout, max_tries): """ Establish an SSH connection to the node and run the provided deployment task. :rtype: :class:`.Node`: :return: Node instance on success. """ ssh_client = SSHClient(hostname=ssh_hostname, port=ssh_port, username=ssh_username, password=ssh_password, key_files=ssh_key_file, timeout=ssh_timeout) ssh_client = self._ssh_client_connect(ssh_client=ssh_client, timeout=timeout) # Execute the deployment task node = self._run_deployment_script(task=task, node=node, ssh_client=ssh_client, max_tries=max_tries) return node def _run_deployment_script(self, task, node, ssh_client, max_tries=3): """ Run the deployment script on the provided node. At this point it is assumed that SSH connection has already been established. :param task: Deployment task to run. :type task: :class:`Deployment` :param node: Node to run the task on. :type node: ``Node`` :param ssh_client: A configured and connected SSHClient instance. :type ssh_client: :class:`SSHClient` :param max_tries: How many times to retry if a deployment fails before giving up. (default is 3) :type max_tries: ``int`` :rtype: :class:`.Node` :return: ``Node`` Node instance on success. """ tries = 0 while tries < max_tries: try: node = task.run(node, ssh_client) except Exception: tries += 1 if tries >= max_tries: e = sys.exc_info()[1] raise LibcloudError(value='Failed after %d tries: %s' % (max_tries, str(e)), driver=self) else: # Deployment succeeded ssh_client.close() return node def _get_size_price(self, size_id): """ Return pricing information for the provided size id. """ return get_size_price(driver_type='compute', driver_name=self.api_name, size_id=size_id) if __name__ == '__main__': import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/compute/ssh.py0000664000175000017500000004320313155756605021055 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Wraps multiple ways to communicate over SSH. """ have_paramiko = False try: import paramiko have_paramiko = True except ImportError: pass # Depending on your version of Paramiko, it may cause a deprecation # warning on Python 2.6. # Ref: https://bugs.launchpad.net/paramiko/+bug/392973 import os import time import subprocess import logging import warnings from os.path import split as psplit from os.path import join as pjoin from libcloud.utils.logging import ExtraLogFormatter from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import b __all__ = [ 'BaseSSHClient', 'ParamikoSSHClient', 'ShellOutSSHClient', 'SSHCommandTimeoutError' ] class SSHCommandTimeoutError(Exception): """ Exception which is raised when an SSH command times out. """ def __init__(self, cmd, timeout): self.cmd = cmd self.timeout = timeout message = 'Command didn\'t finish in %s seconds' % (timeout) super(SSHCommandTimeoutError, self).__init__(message) def __repr__(self): return ('' % (self.cmd, self.timeout)) def __str__(self): return self.message class BaseSSHClient(object): """ Base class representing a connection over SSH/SCP to a remote node. """ def __init__(self, hostname, port=22, username='root', password=None, key=None, key_files=None, timeout=None): """ :type hostname: ``str`` :keyword hostname: Hostname or IP address to connect to. :type port: ``int`` :keyword port: TCP port to communicate on, defaults to 22. :type username: ``str`` :keyword username: Username to use, defaults to root. :type password: ``str`` :keyword password: Password to authenticate with or a password used to unlock a private key if a password protected key is used. :param key: Deprecated in favor of ``key_files`` argument. :type key_files: ``str`` or ``list`` :keyword key_files: A list of paths to the private key files to use. """ if key is not None: message = ('You are using deprecated "key" argument which has ' 'been replaced with "key_files" argument') warnings.warn(message, DeprecationWarning) # key_files has precedent key_files = key if not key_files else key_files self.hostname = hostname self.port = port self.username = username self.password = password self.key_files = key_files self.timeout = timeout def connect(self): """ Connect to the remote node over SSH. :return: True if the connection has been successfully established, False otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'connect not implemented for this ssh client') def put(self, path, contents=None, chmod=None, mode='w'): """ Upload a file to the remote node. :type path: ``str`` :keyword path: File path on the remote node. :type contents: ``str`` :keyword contents: File Contents. :type chmod: ``int`` :keyword chmod: chmod file to this after creation. :type mode: ``str`` :keyword mode: Mode in which the file is opened. :return: Full path to the location where a file has been saved. :rtype: ``str`` """ raise NotImplementedError( 'put not implemented for this ssh client') def delete(self, path): """ Delete/Unlink a file on the remote node. :type path: ``str`` :keyword path: File path on the remote node. :return: True if the file has been successfully deleted, False otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'delete not implemented for this ssh client') def run(self, cmd): """ Run a command on a remote node. :type cmd: ``str`` :keyword cmd: Command to run. :return ``list`` of [stdout, stderr, exit_status] """ raise NotImplementedError( 'run not implemented for this ssh client') def close(self): """ Shutdown connection to the remote node. :return: True if the connection has been successfully closed, False otherwise. :rtype: ``bool`` """ raise NotImplementedError( 'close not implemented for this ssh client') def _get_and_setup_logger(self): logger = logging.getLogger('libcloud.compute.ssh') path = os.getenv('LIBCLOUD_DEBUG') if path: handler = logging.FileHandler(path) handler.setFormatter(ExtraLogFormatter()) logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger class ParamikoSSHClient(BaseSSHClient): """ A SSH Client powered by Paramiko. """ # Maximum number of bytes to read at once from a socket CHUNK_SIZE = 4096 # How long to sleep while waiting for command to finish (to prevent busy # waiting) SLEEP_DELAY = 0.2 def __init__(self, hostname, port=22, username='root', password=None, key=None, key_files=None, key_material=None, timeout=None): """ Authentication is always attempted in the following order: - The key passed in (if key is provided) - Any key we can find through an SSH agent (only if no password and key is provided) - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no password and key is provided) - Plain username/password auth, if a password was given (if password is provided) """ if key_files and key_material: raise ValueError(('key_files and key_material arguments are ' 'mutually exclusive')) super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port, username=username, password=password, key=key, key_files=key_files, timeout=timeout) self.key_material = key_material self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.logger = self._get_and_setup_logger() def connect(self): conninfo = {'hostname': self.hostname, 'port': self.port, 'username': self.username, 'allow_agent': False, 'look_for_keys': False} if self.password: conninfo['password'] = self.password if self.key_files: conninfo['key_filename'] = self.key_files if self.key_material: conninfo['pkey'] = self._get_pkey_object(key=self.key_material) if not self.password and not (self.key_files or self.key_material): conninfo['allow_agent'] = True conninfo['look_for_keys'] = True if self.timeout: conninfo['timeout'] = self.timeout extra = {'_hostname': self.hostname, '_port': self.port, '_username': self.username, '_timeout': self.timeout} self.logger.debug('Connecting to server', extra=extra) self.client.connect(**conninfo) return True def put(self, path, contents=None, chmod=None, mode='w'): extra = {'_path': path, '_mode': mode, '_chmod': chmod} self.logger.debug('Uploading file', extra=extra) sftp = self.client.open_sftp() # less than ideal, but we need to mkdir stuff otherwise file() fails head, tail = psplit(path) if path[0] == "/": sftp.chdir("/") else: # Relative path - start from a home directory (~) sftp.chdir('.') for part in head.split("/"): if part != "": try: sftp.mkdir(part) except IOError: # so, there doesn't seem to be a way to # catch EEXIST consistently *sigh* pass sftp.chdir(part) cwd = sftp.getcwd() ak = sftp.file(tail, mode=mode) ak.write(contents) if chmod is not None: ak.chmod(chmod) ak.close() sftp.close() if path[0] == '/': file_path = path else: file_path = pjoin(cwd, path) return file_path def delete(self, path): extra = {'_path': path} self.logger.debug('Deleting file', extra=extra) sftp = self.client.open_sftp() sftp.unlink(path) sftp.close() return True def run(self, cmd, timeout=None): """ Note: This function is based on paramiko's exec_command() method. :param timeout: How long to wait (in seconds) for the command to finish (optional). :type timeout: ``float`` """ extra = {'_cmd': cmd} self.logger.debug('Executing command', extra=extra) # Use the system default buffer size bufsize = -1 transport = self.client.get_transport() chan = transport.open_session() start_time = time.time() chan.exec_command(cmd) stdout = StringIO() stderr = StringIO() # Create a stdin file and immediately close it to prevent any # interactive script from hanging the process. stdin = chan.makefile('wb', bufsize) stdin.close() # Receive all the output # Note #1: This is used instead of chan.makefile approach to prevent # buffering issues and hanging if the executed command produces a lot # of output. # # Note #2: If you are going to remove "ready" checks inside the loop # you are going to have a bad time. Trying to consume from a channel # which is not ready will block for indefinitely. exit_status_ready = chan.exit_status_ready() if exit_status_ready: # It's possible that some data is already available when exit # status is ready stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) while not exit_status_ready: current_time = time.time() elapsed_time = (current_time - start_time) if timeout and (elapsed_time > timeout): # TODO: Is this the right way to clean up? chan.close() raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout) stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) # We need to check the exist status here, because the command could # print some output and exit during this sleep below. exit_status_ready = chan.exit_status_ready() if exit_status_ready: break # Short sleep to prevent busy waiting time.sleep(self.SLEEP_DELAY) # Receive the exit status code of the command we ran. status = chan.recv_exit_status() stdout = stdout.getvalue() stderr = stderr.getvalue() extra = {'_status': status, '_stdout': stdout, '_stderr': stderr} self.logger.debug('Command finished', extra=extra) return [stdout, stderr, status] def close(self): self.logger.debug('Closing server connection') self.client.close() return True def _consume_stdout(self, chan): """ Try to consume stdout data from chan if it's receive ready. """ stdout = self._consume_data_from_channel( chan=chan, recv_method=chan.recv, recv_ready_method=chan.recv_ready) return stdout def _consume_stderr(self, chan): """ Try to consume stderr data from chan if it's receive ready. """ stderr = self._consume_data_from_channel( chan=chan, recv_method=chan.recv_stderr, recv_ready_method=chan.recv_stderr_ready) return stderr def _consume_data_from_channel(self, chan, recv_method, recv_ready_method): """ Try to consume data from the provided channel. Keep in mind that data is only consumed if the channel is receive ready. """ result = StringIO() result_bytes = bytearray() if recv_ready_method(): data = recv_method(self.CHUNK_SIZE) result_bytes += b(data) while data: ready = recv_ready_method() if not ready: break data = recv_method(self.CHUNK_SIZE) result_bytes += b(data) # We only decode data at the end because a single chunk could contain # a part of multi byte UTF-8 character (whole multi bytes character # could be split over two chunks) result.write(result_bytes.decode('utf-8')) return result def _get_pkey_object(self, key): """ Try to detect private key type and return paramiko.PKey object. """ for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]: try: key = cls.from_private_key(StringIO(key)) except paramiko.ssh_exception.SSHException: # Invalid key, try other key type pass else: return key msg = ('Invalid or unsupported key type (only RSA, DSS and ECDSA keys' ' are supported)') raise paramiko.ssh_exception.SSHException(msg) class ShellOutSSHClient(BaseSSHClient): """ This client shells out to "ssh" binary to run commands on the remote server. Note: This client should not be used in production. """ def __init__(self, hostname, port=22, username='root', password=None, key=None, key_files=None, timeout=None): super(ShellOutSSHClient, self).__init__(hostname=hostname, port=port, username=username, password=password, key=key, key_files=key_files, timeout=timeout) if self.password: raise ValueError('ShellOutSSHClient only supports key auth') child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) child.communicate() if child.returncode == 127: raise ValueError('ssh client is not available') self.logger = self._get_and_setup_logger() def connect(self): """ This client doesn't support persistent connections establish a new connection every time "run" method is called. """ return True def run(self, cmd): return self._run_remote_shell_command([cmd]) def put(self, path, contents=None, chmod=None, mode='w'): if mode == 'w': redirect = '>' elif mode == 'a': redirect = '>>' else: raise ValueError('Invalid mode: ' + mode) cmd = ['echo "%s" %s %s' % (contents, redirect, path)] self._run_remote_shell_command(cmd) return path def delete(self, path): cmd = ['rm', '-rf', path] self._run_remote_shell_command(cmd) return True def close(self): return True def _get_base_ssh_command(self): cmd = ['ssh'] if self.key_files: cmd += ['-i', self.key_files] if self.timeout: cmd += ['-oConnectTimeout=%s' % (self.timeout)] cmd += ['%s@%s' % (self.username, self.hostname)] return cmd def _run_remote_shell_command(self, cmd): """ Run a command on a remote server. :param cmd: Command to run. :type cmd: ``list`` of ``str`` :return: Command stdout, stderr and status code. :rtype: ``tuple`` """ base_cmd = self._get_base_ssh_command() full_cmd = base_cmd + [' '.join(cmd)] self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd))) child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = child.communicate() return (stdout, stderr, child.returncode) class MockSSHClient(BaseSSHClient): pass SSHClient = ParamikoSSHClient if not have_paramiko: SSHClient = MockSSHClient apache-libcloud-2.2.1/libcloud/http.py0000664000175000017500000002235013160264502017545 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Subclass for httplib.HTTPSConnection with optional certificate name verification, depending on libcloud.security settings. """ import os import warnings import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.poolmanager import PoolManager import libcloud.security from libcloud.utils.py3 import urlparse, PY3 __all__ = [ 'LibcloudBaseConnection', 'LibcloudConnection' ] ALLOW_REDIRECTS = 1 HTTP_PROXY_ENV_VARIABLE_NAME = 'http_proxy' class SignedHTTPSAdapter(HTTPAdapter): def __init__(self, cert_file, key_file): self.cert_file = cert_file self.key_file = key_file super(SignedHTTPSAdapter, self).__init__() def init_poolmanager(self, connections, maxsize, block=False): self.poolmanager = PoolManager( num_pools=connections, maxsize=maxsize, block=block, cert_file=self.cert_file, key_file=self.key_file) class LibcloudBaseConnection(object): """ Base connection class to inherit from. Note: This class should not be instantiated directly. """ session = None proxy_scheme = None proxy_host = None proxy_port = None proxy_username = None proxy_password = None http_proxy_used = False ca_cert = None def __init__(self): self.session = requests.Session() def set_http_proxy(self, proxy_url): """ Set a HTTP proxy which will be used with this connection. :param proxy_url: Proxy URL (e.g. http://: without authentication and http://:@: for basic auth authentication information. :type proxy_url: ``str`` """ result = self._parse_proxy_url(proxy_url=proxy_url) scheme = result[0] host = result[1] port = result[2] username = result[3] password = result[4] self.proxy_scheme = scheme self.proxy_host = host self.proxy_port = port self.proxy_username = username self.proxy_password = password self.http_proxy_used = True self.session.proxies = { self.proxy_scheme: proxy_url } def _parse_proxy_url(self, proxy_url): """ Parse and validate a proxy URL. :param proxy_url: Proxy URL (e.g. http://hostname:3128) :type proxy_url: ``str`` :rtype: ``tuple`` (``scheme``, ``hostname``, ``port``) """ parsed = urlparse.urlparse(proxy_url) if parsed.scheme != 'http': raise ValueError('Only http proxies are supported') if not parsed.hostname or not parsed.port: raise ValueError('proxy_url must be in the following format: ' 'http://:') proxy_scheme = parsed.scheme proxy_host, proxy_port = parsed.hostname, parsed.port netloc = parsed.netloc if '@' in netloc: username_password = netloc.split('@', 1)[0] split = username_password.split(':', 1) if len(split) < 2: raise ValueError('URL is in an invalid format') proxy_username, proxy_password = split[0], split[1] else: proxy_username = None proxy_password = None return (proxy_scheme, proxy_host, proxy_port, proxy_username, proxy_password) def _setup_verify(self): self.verify = libcloud.security.VERIFY_SSL_CERT def _setup_ca_cert(self): if self.verify is False: pass else: if isinstance(libcloud.security.CA_CERTS_PATH, list): if len(libcloud.security.CA_CERTS_PATH) > 1: warnings.warn('Only 1 certificate path is supported') self.ca_cert = libcloud.security.CA_CERTS_PATH[0] else: self.ca_cert = libcloud.security.CA_CERTS_PATH def _setup_signing(self, cert_file=None, key_file=None): """ Setup request signing by mounting a signing adapter to the session """ self.session.mount('https://', SignedHTTPSAdapter(cert_file, key_file)) class LibcloudConnection(LibcloudBaseConnection): timeout = None host = None response = None def __init__(self, host, port, secure=None, **kwargs): scheme = 'https' if secure is not None and secure else 'http' self.host = '{0}://{1}{2}'.format( 'https' if port == 443 else scheme, host, ":{0}".format(port) if port not in (80, 443) else "" ) # Support for HTTP proxy proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None) proxy_url = kwargs.pop('proxy_url', proxy_url_env) self._setup_verify() self._setup_ca_cert() LibcloudBaseConnection.__init__(self) if 'cert_file' in kwargs or 'key_file' in kwargs: self._setup_signing(**kwargs) if proxy_url: self.set_http_proxy(proxy_url=proxy_url) self.session.timeout = kwargs.get('timeout', 60) @property def verification(self): """ The option for SSL verification given to underlying requests """ return self.ca_cert if self.ca_cert is not None else self.verify def request(self, method, url, body=None, headers=None, raw=False, stream=False): url = urlparse.urljoin(self.host, url) headers = self._normalize_headers(headers=headers) self.response = self.session.request( method=method.lower(), url=url, data=body, headers=headers, allow_redirects=ALLOW_REDIRECTS, stream=stream, verify=self.verification ) def prepared_request(self, method, url, body=None, headers=None, raw=False, stream=False): headers = self._normalize_headers(headers=headers) req = requests.Request(method, ''.join([self.host, url]), data=body, headers=headers) prepped = self.session.prepare_request(req) prepped.body = body self.response = self.session.send( prepped, stream=raw, verify=self.ca_cert if self.ca_cert is not None else self.verify) def getresponse(self): return self.response def getheaders(self): # urlib decoded response body, libcloud has a bug # and will not check if content is gzipped, so let's # remove headers indicating compressed content. if 'content-encoding' in self.response.headers: del self.response.headers['content-encoding'] return self.response.headers @property def status(self): return self.response.status_code @property def reason(self): return None if self.response.status_code > 400 else self.response.text def connect(self): # pragma: no cover pass def read(self): return self.response.content def close(self): # pragma: no cover # return connection back to pool self.response.close() def _normalize_headers(self, headers): headers = headers or {} # all headers should be strings for key, value in headers.items(): if isinstance(value, (int, float)): headers[key] = str(value) return headers class HttpLibResponseProxy(object): """ Provides a proxy pattern around the :class:`requests.Reponse` object to a :class:`httplib.HTTPResponse` object """ def __init__(self, response): self._response = response def read(self, amt=None): return self._response.text def getheader(self, name, default=None): """ Get the contents of the header name, or default if there is no matching header. """ if name in self._response.headers.keys(): return self._response.headers[name] else: return default def getheaders(self): """ Return a list of (header, value) tuples. """ if PY3: return list(self._response.headers.items()) else: return self._response.headers.items() @property def status(self): return self._response.status_code @property def reason(self): return self._response.reason @property def version(self): # requests doesn't expose this return '11' apache-libcloud-2.2.1/libcloud/container/0000775000175000017500000000000013160535107020176 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/providers.py0000664000175000017500000000352413153541406022572 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.container.types import Provider from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver DRIVERS = { Provider.DUMMY: ('libcloud.container.drivers.dummy', 'DummyContainerDriver'), Provider.DOCKER: ('libcloud.container.drivers.docker', 'DockerContainerDriver'), Provider.JOYENT: ('libcloud.container.drivers.joyent', 'JoyentContainerDriver'), Provider.ECS: ('libcloud.container.drivers.ecs', 'ElasticContainerDriver'), Provider.KUBERNETES: ('libcloud.container.drivers.kubernetes', 'KubernetesContainerDriver'), Provider.RANCHER: ('libcloud.container.drivers.rancher', 'RancherContainerDriver'), Provider.GKE: ('libcloud.container.drivers.gke', 'GKEContainerDriver') } def get_driver(provider): return _get_provider_driver(drivers=DRIVERS, provider=provider) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/container/__init__.py0000664000175000017500000000000012701023453022271 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/types.py0000664000175000017500000000533413153541406021722 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'Provider', 'ContainerState' ] class Type(object): @classmethod def tostring(cls, value): """Return the string representation of the state object attribute :param str value: the state object to turn into string :return: the uppercase string that represents the state object :rtype: str """ return value.upper() @classmethod def fromstring(cls, value): """Return the state object attribute that matches the string :param str value: the string to look up :return: the state object attribute that matches the string :rtype: str """ return getattr(cls, value.upper(), None) class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. """ DUMMY = 'dummy' DOCKER = 'docker' ECS = 'ecs' GKE = 'GKE' JOYENT = 'joyent' KUBERNETES = 'kubernetes' RANCHER = 'rancher' class ContainerState(Type): """ Standard states for a container :cvar RUNNING: Container is running. :cvar REBOOTING: Container is rebooting. :cvar TERMINATED: Container is terminated. This container can't be started later on. :cvar STOPPED: Container is stopped. This container can be started later on. :cvar PENDING: Container is pending. :cvar SUSPENDED: Container is suspended. :cvar ERROR: Container is an error state. Usually no operations can be performed on the container once it ends up in the error state. :cvar PAUSED: Container is paused. :cvar UNKNOWN: Container state is unknown. """ RUNNING = 'running' REBOOTING = 'rebooting' TERMINATED = 'terminated' PENDING = 'pending' UNKNOWN = 'unknown' STOPPED = 'stopped' SUSPENDED = 'suspended' ERROR = 'error' PAUSED = 'paused' apache-libcloud-2.2.1/libcloud/container/drivers/0000775000175000017500000000000013160535107021654 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/drivers/__init__.py0000664000175000017500000000000012701023453023747 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/drivers/ecs.py0000664000175000017500000005033612701023453023003 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import simplejson as json except ImportError: import json from libcloud.container.base import (ContainerDriver, Container, ContainerCluster, ContainerImage) from libcloud.container.types import ContainerState from libcloud.container.utils.docker import RegistryClient from libcloud.common.aws import SignedAWSConnection, AWSJsonResponse __all__ = [ 'ElasticContainerDriver' ] ECS_VERSION = '2014-11-13' ECR_VERSION = '2015-09-21' ECS_HOST = 'ecs.%s.amazonaws.com' ECR_HOST = 'ecr.%s.amazonaws.com' ROOT = '/' ECS_TARGET_BASE = 'AmazonEC2ContainerServiceV%s' % \ (ECS_VERSION.replace('-', '')) ECR_TARGET_BASE = 'AmazonEC2ContainerRegistry_V%s' % \ (ECR_VERSION.replace('-', '')) class ECSJsonConnection(SignedAWSConnection): version = ECS_VERSION host = ECS_HOST responseCls = AWSJsonResponse service_name = 'ecs' class ECRJsonConnection(SignedAWSConnection): version = ECR_VERSION host = ECR_HOST responseCls = AWSJsonResponse service_name = 'ecr' class ElasticContainerDriver(ContainerDriver): name = 'Amazon Elastic Container Service' website = 'https://aws.amazon.com/ecs/details/' ecr_repository_host = '%s.dkr.ecr.%s.amazonaws.com' connectionCls = ECSJsonConnection ecrConnectionClass = ECRJsonConnection supports_clusters = False status_map = { 'RUNNING': ContainerState.RUNNING } def __init__(self, access_id, secret, region): super(ElasticContainerDriver, self).__init__(access_id, secret) self.region = region self.region_name = region self.connection.host = ECS_HOST % (region) # Setup another connection class for ECR conn_kwargs = self._ex_connection_class_kwargs() self.ecr_connection = self.ecrConnectionClass( access_id, secret, **conn_kwargs) self.ecr_connection.host = ECR_HOST % (region) self.ecr_connection.driver = self self.ecr_connection.connect() def _ex_connection_class_kwargs(self): return {'signature_version': '4'} def list_images(self, ex_repository_name): """ List the images in an ECR repository :param ex_repository_name: The name of the repository to check defaults to the default repository. :type ex_repository_name: ``str`` :return: a list of images :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ request = {} request['repositoryName'] = ex_repository_name list_response = self.ecr_connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_ecr_headers('ListImages') ).object repository_id = self.ex_get_repository_id(ex_repository_name) host = self._get_ecr_host(repository_id) return self._to_images(list_response['imageIds'], host, ex_repository_name) def list_clusters(self): """ Get a list of potential locations to deploy clusters into :param location: The location to search in :type location: :class:`libcloud.container.base.ClusterLocation` :rtype: ``list`` of :class:`libcloud.container.base.ContainerCluster` """ listdata = self.connection.request( ROOT, method='POST', data=json.dumps({}), headers=self._get_headers('ListClusters') ).object request = {'clusters': listdata['clusterArns']} data = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('DescribeClusters') ).object return self._to_clusters(data) def create_cluster(self, name, location=None): """ Create a container cluster :param name: The name of the cluster :type name: ``str`` :param location: The location to create the cluster in :type location: :class:`libcloud.container.base.ClusterLocation` :rtype: :class:`libcloud.container.base.ContainerCluster` """ request = {'clusterName': name} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('CreateCluster') ).object return self._to_cluster(response['cluster']) def destroy_cluster(self, cluster): """ Delete a cluster :return: ``True`` if the destroy was successful, otherwise ``False``. :rtype: ``bool`` """ request = {'cluster': cluster.id} data = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('DeleteCluster') ).object return data['cluster']['status'] == 'INACTIVE' def list_containers(self, image=None, cluster=None): """ List the deployed container images :param image: Filter to containers with a certain image :type image: :class:`libcloud.container.base.ContainerImage` :param cluster: Filter to containers in a cluster :type cluster: :class:`libcloud.container.base.ContainerCluster` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ request = {'cluster': 'default'} if cluster is not None: request['cluster'] = cluster.id if image is not None: request['family'] = image.name list_response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('ListTasks') ).object if len(list_response['taskArns']) == 0: return [] containers = self.ex_list_containers_for_task( list_response['taskArns']) return containers def deploy_container(self, name, image, cluster=None, parameters=None, start=True, ex_cpu=10, ex_memory=500, ex_container_port=None, ex_host_port=None): """ Creates a task definition from a container image that can be run in a cluster. :param name: The name of the new container :type name: ``str`` :param image: The container image to deploy :type image: :class:`libcloud.container.base.ContainerImage` :param cluster: The cluster to deploy to, None is default :type cluster: :class:`libcloud.container.base.ContainerCluster` :param parameters: Container Image parameters :type parameters: ``str`` :param start: Start the container on deployment :type start: ``bool`` :rtype: :class:`libcloud.container.base.Container` """ data = {} if ex_container_port is None and ex_host_port is None: port_maps = [] else: port_maps = [ { "containerPort": ex_container_port, "hostPort": ex_host_port } ] data['containerDefinitions'] = [ { "mountPoints": [], "name": name, "image": image.name, "cpu": ex_cpu, "environment": [], "memory": ex_memory, "portMappings": port_maps, "essential": True, "volumesFrom": [] } ] data['family'] = name response = self.connection.request( ROOT, method='POST', data=json.dumps(data), headers=self._get_headers('RegisterTaskDefinition') ).object if start: return self.ex_start_task( response['taskDefinition']['taskDefinitionArn'])[0] else: return Container( id=None, name=name, image=image, state=ContainerState.RUNNING, ip_addresses=[], extra={ 'taskDefinitionArn': response['taskDefinition']['taskDefinitionArn'] }, driver=self.connection.driver ) def get_container(self, id): """ Get a container by ID :param id: The ID of the container to get :type id: ``str`` :rtype: :class:`libcloud.container.base.Container` """ containers = self.ex_list_containers_for_task([id]) return containers[0] def start_container(self, container, count=1): """ Start a deployed task :param container: The container to start :type container: :class:`libcloud.container.base.Container` :param count: Number of containers to start :type count: ``int`` :rtype: :class:`libcloud.container.base.Container` """ return self.ex_start_task(container.extra['taskDefinitionArn'], count) def stop_container(self, container): """ Stop a deployed container :param container: The container to stop :type container: :class:`libcloud.container.base.Container` :rtype: :class:`libcloud.container.base.Container` """ request = {'task': container.extra['taskArn']} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('StopTask') ).object containers = [] containers.extend(self._to_containers( response['task'], container.extra['taskDefinitionArn'])) return containers def restart_container(self, container): """ Restart a deployed container :param container: The container to restart :type container: :class:`libcloud.container.base.Container` :rtype: :class:`libcloud.container.base.Container` """ self.stop_container(container) return self.start_container(container) def destroy_container(self, container): """ Destroy a deployed container :param container: The container to destroy :type container: :class:`libcloud.container.base.Container` :rtype: :class:`libcloud.container.base.Container` """ return self.stop_container(container) def ex_start_task(self, task_arn, count=1): """ Run a task definition and get the containers :param task_arn: The task ARN to Run :type task_arn: ``str`` :param count: The number of containers to start :type count: ``int`` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ request = None request = {'count': count, 'taskDefinition': task_arn} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('RunTask') ).object containers = [] for task in response['tasks']: containers.extend(self._to_containers(task, task_arn)) return containers def ex_list_containers_for_task(self, task_arns): """ Get a list of containers by ID collection (ARN) :param task_arns: The list of ARNs :type task_arns: ``list`` of ``str`` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ describe_request = {'tasks': task_arns} descripe_response = self.connection.request( ROOT, method='POST', data=json.dumps(describe_request), headers=self._get_headers('DescribeTasks') ).object containers = [] for task in descripe_response['tasks']: containers.extend(self._to_containers( task, task['taskDefinitionArn'])) return containers def ex_create_service(self, name, cluster, task_definition, desired_count=1): """ Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desired_count, Amazon ECS spawns another instantiation of the task in the specified cluster. :param name: the name of the service :type name: ``str`` :param cluster: The cluster to run the service on :type cluster: :class:`libcloud.container.base.ContainerCluster` :param task_definition: The task definition name or ARN for the service :type task_definition: ``str`` :param desired_count: The desired number of tasks to be running at any one time :type desired_count: ``int`` :rtype: ``object`` The service object """ request = { 'serviceName': name, 'taskDefinition': task_definition, 'desiredCount': desired_count, 'cluster': cluster.id} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('CreateService') ).object return response['service'] def ex_list_service_arns(self, cluster=None): """ List the services :param cluster: The cluster hosting the services :type cluster: :class:`libcloud.container.base.ContainerCluster` :rtype: ``list`` of ``str`` """ request = {} if cluster is not None: request['cluster'] = cluster.id response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('ListServices') ).object return response['serviceArns'] def ex_describe_service(self, service_arn): """ Get the details of a service :param cluster: The hosting cluster :type cluster: :class:`libcloud.container.base.ContainerCluster` :param service_arn: The service ARN to describe :type service_arn: ``str`` :return: The service object :rtype: ``object`` """ request = {'services': [service_arn]} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('DescribeServices') ).object return response['services'][0] def ex_destroy_service(self, service_arn): """ Deletes a service :param cluster: The target cluster :type cluster: :class:`libcloud.container.base.ContainerCluster` :param service_arn: The service ARN to destroy :type service_arn: ``str`` """ request = { 'service': service_arn} response = self.connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_headers('DeleteService') ).object return response['service'] def ex_get_registry_client(self, repository_name): """ Get a client for an ECR repository :param repository_name: The unique name of the repository :type repository_name: ``str`` :return: a docker registry API client :rtype: :class:`libcloud.container.utils.docker.RegistryClient` """ repository_id = self.ex_get_repository_id(repository_name) token = self.ex_get_repository_token(repository_id) host = self._get_ecr_host(repository_id) return RegistryClient( host=host, username='AWS', password=token ) def ex_get_repository_token(self, repository_id): """ Get the authorization token (12 hour expiry) for a repository :param repository_id: The ID of the repository :type repository_id: ``str`` :return: A token for login :rtype: ``str`` """ request = {'RegistryIds': [repository_id]} response = self.ecr_connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_ecr_headers('GetAuthorizationToken') ).object return response['authorizationData'][0]['authorizationToken'] def ex_get_repository_id(self, repository_name): """ Get the ID of a repository :param repository_name: The unique name of the repository :type repository_name: ``str`` :return: The repository ID :rtype: ``str`` """ request = {'repositoryNames': [repository_name]} list_response = self.ecr_connection.request( ROOT, method='POST', data=json.dumps(request), headers=self._get_ecr_headers('DescribeRepositories') ).object repository_id = list_response['repositories'][0]['registryId'] return repository_id def _get_ecr_host(self, repository_id): return self.ecr_repository_host % ( repository_id, self.region) def _get_headers(self, action): """ Get the default headers for a request to the ECS API """ return {'x-amz-target': '%s.%s' % (ECS_TARGET_BASE, action), 'Content-Type': 'application/x-amz-json-1.1' } def _get_ecr_headers(self, action): """ Get the default headers for a request to the ECR API """ return {'x-amz-target': '%s.%s' % (ECR_TARGET_BASE, action), 'Content-Type': 'application/x-amz-json-1.1' } def _to_clusters(self, data): clusters = [] for cluster in data['clusters']: clusters.append(self._to_cluster(cluster)) return clusters def _to_cluster(self, data): return ContainerCluster( id=data['clusterArn'], name=data['clusterName'], driver=self.connection.driver ) def _to_containers(self, data, task_definition_arn): clusters = [] for cluster in data['containers']: clusters.append(self._to_container(cluster, task_definition_arn)) return clusters def _to_container(self, data, task_definition_arn): return Container( id=data['containerArn'], name=data['name'], image=ContainerImage( id=None, name=data['name'], path=None, version=None, driver=self.connection.driver ), ip_addresses=None, state=self.status_map.get(data['lastStatus'], None), extra={ 'taskArn': data['taskArn'], 'taskDefinitionArn': task_definition_arn }, driver=self.connection.driver ) def _to_images(self, data, host, repository_name): images = [] for image in data: images.append(self._to_image(image, host, repository_name)) return images def _to_image(self, data, host, repository_name): path = '%s/%s:%s' % ( host, repository_name, data['imageTag'] ) return ContainerImage( id=None, name=path, path=path, version=data['imageTag'], driver=self.connection.driver ) apache-libcloud-2.2.1/libcloud/container/drivers/rancher.py0000664000175000017500000006110113153541406023650 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 try: import simplejson as json except: import json from libcloud.utils.py3 import httplib, urlparse from libcloud.utils.py3 import b from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.container.base import (Container, ContainerDriver, ContainerImage) from libcloud.container.providers import Provider from libcloud.container.types import ContainerState VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] class RancherResponse(JsonResponse): def parse_error(self): parsed = super(RancherResponse, self).parse_error() if 'fieldName' in parsed: return "Field %s is %s: %s - %s" % (parsed['fieldName'], parsed['code'], parsed['message'], parsed['detail']) else: return "%s - %s" % (parsed['message'], parsed['detail']) def success(self): return self.status in VALID_RESPONSE_CODES class RancherException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "RancherException %s %s" % (self.code, self.message) class RancherConnection(ConnectionUserAndKey): responseCls = RancherResponse timeout = 30 def add_default_headers(self, headers): """ Add parameters that are necessary for every request If user and password are specified, include a base http auth header """ headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' if self.key and self.user_id: user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class RancherContainerDriver(ContainerDriver): """ Driver for Rancher by Rancher Labs. This driver is capable of interacting with the Version 1 API of Rancher. It currently does NOT support the Version 2 API. Example: >>> from libcloud.container.providers import get_driver >>> from libcloud.container.types import Provider >>> driver = get_driver(Provider.RANCHER) >>> connection = driver(key="ACCESS_KEY_HERE", secret="SECRET_KEY_HERE", host="172.30.0.100", port=8080) >>> image = ContainerImage("hastebin", "hastebin", "rlister/hastebin", "latest", driver=None) >>> newcontainer = connection.deploy_container("myawesomepastebin", image, environment={"STORAGE_TYPE": "file"}) :ivar baseuri: The URL base path to the API. :type baseuri: ``str`` """ type = Provider.RANCHER name = 'Rancher' website = 'http://rancher.com' connectionCls = RancherConnection # Holding off on cluster support for now. # Only Environment API interaction enabled. supports_clusters = False # As in the /v1/ version = '1' def __init__(self, key, secret, secure=True, host='localhost', port=443): """ Creates a new Rancher Container driver. :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. :type secure: ``bool`` :param host: Override hostname used for connections. This can also be a full URL string, including scheme, port, and base path. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: A newly initialized driver instance. """ # Parse the Given Host if '://' not in host and not host.startswith("//"): host = '//' + host parsed = urlparse.urlparse(host) super(RancherContainerDriver, self).__init__( key=key, secret=secret, secure=False if parsed.scheme == 'http' else secure, host=parsed.hostname, port=parsed.port if parsed.port else port ) self.baseuri = parsed.path if parsed.path else "/v%s" % self.version def ex_list_stacks(self): """ List all Rancher Stacks http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/ :rtype: ``list`` of ``dict`` """ result = self.connection.request( "%s/environments" % self.baseuri).object return result['data'] def ex_deploy_stack(self, name, description=None, docker_compose=None, environment=None, external_id=None, rancher_compose=None, start=True): """ Deploy a new stack. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#create :param name: The desired name of the stack. (required) :type name: ``str`` :param description: A desired description for the stack. :type description: ``str`` :param docker_compose: The Docker Compose configuration to use. :type docker_compose: ``str`` :param environment: Environment K/V specific to this stack. :type environment: ``dict`` :param external_id: The externalId of the stack. :type external_id: ``str`` :param rancher_compose: The Rancher Compose configuration for this env. :type rancher_compose: ``str`` :param start: Whether to start this stack on creation. :type start: ``bool`` :return: The newly created stack. :rtype: ``dict`` """ payload = { "description": description, "dockerCompose": docker_compose, "environment": environment, "externalId": external_id, "name": name, "rancherCompose": rancher_compose, "startOnCreate": start } data = json.dumps(dict((k, v) for (k, v) in payload.items() if v is not None)) result = self.connection.request('%s/environments' % self.baseuri, data=data, method='POST').object return result def ex_get_stack(self, env_id): """ Get a stack by ID :param env_id: The stack to be obtained. :type env_id: ``str`` :rtype: ``dict`` """ result = self.connection.request("%s/environments/%s" % (self.baseuri, env_id)).object return result def ex_search_stacks(self, search_params): """ Search for stacks matching certain filters i.e. ``{ "name": "awesomestack"}`` :param search_params: A collection of search parameters to use. :type search_params: ``dict`` :rtype: ``list`` """ search_list = [] for f, v in search_params.items(): search_list.append(f + '=' + v) search_items = '&'.join(search_list) result = self.connection.request("%s/environments?%s" % ( self.baseuri, search_items)).object return result['data'] def ex_destroy_stack(self, env_id): """ Destroy a stack by ID http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#delete :param env_id: The stack to be destroyed. :type env_id: ``str`` :return: True if destroy was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('%s/environments/%s' % ( self.baseuri, env_id), method='DELETE') return result.status in VALID_RESPONSE_CODES def ex_activate_stack(self, env_id): """ Activate Services for a stack. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#activateservices :param env_id: The stack to activate services for. :type env_id: ``str`` :return: True if activate was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request( '%s/environments/%s?action=activateservices' % ( self.baseuri, env_id), method='POST' ) return result.status in VALID_RESPONSE_CODES def ex_deactivate_stack(self, env_id): """ Deactivate Services for a stack. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#deactivateservices :param env_id: The stack to deactivate services for. :type env_id: ``str`` :return: True if deactivate was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request( '%s/environments/%s?action=deactivateservices' % ( self.baseuri, env_id), method='POST' ) return result.status in VALID_RESPONSE_CODES def ex_list_services(self): """ List all Rancher Services http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/ :rtype: ``list`` of ``dict`` """ result = self.connection.request("%s/services" % self.baseuri).object return result['data'] def ex_deploy_service(self, name, image, environment_id, start=True, assign_service_ip_address=None, service_description=None, external_id=None, metadata=None, retain_ip=None, scale=None, scale_policy=None, secondary_launch_configs=None, selector_container=None, selector_link=None, vip=None, **launch_conf): """ Deploy a Rancher Service under a stack. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#create *Any further configuration passed applies to the ``launchConfig``* :param name: The desired name of the service. (required) :type name: ``str`` :param image: The Image object to deploy. (required) :type image: :class:`libcloud.container.base.ContainerImage` :param environment_id: The stack ID this service is tied to. (required) :type environment_id: ``str`` :param start: Whether to start the service on creation. :type start: ``bool`` :param assign_service_ip_address: The IP address to assign the service. :type assign_service_ip_address: ``bool`` :param service_description: The service description. :type service_description: ``str`` :param external_id: The externalId for this service. :type external_id: ``str`` :param metadata: K/V Metadata for this service. :type metadata: ``dict`` :param retain_ip: Whether this service should retain its IP. :type retain_ip: ``bool`` :param scale: The scale of containers in this service. :type scale: ``int`` :param scale_policy: The scaling policy for this service. :type scale_policy: ``dict`` :param secondary_launch_configs: Secondary container launch configs. :type secondary_launch_configs: ``list`` :param selector_container: The selectorContainer for this service. :type selector_container: ``str`` :param selector_link: The selectorLink for this service. :type selector_link: ``type`` :param vip: The VIP to assign to this service. :type vip: ``str`` :return: The newly created service. :rtype: ``dict`` """ launch_conf['imageUuid'] = self._degen_image(image), service_payload = { "assignServiceIpAddress": assign_service_ip_address, "description": service_description, "environmentId": environment_id, "externalId": external_id, "launchConfig": launch_conf, "metadata": metadata, "name": name, "retainIp": retain_ip, "scale": scale, "scalePolicy": scale_policy, "secondary_launch_configs": secondary_launch_configs, "selectorContainer": selector_container, "selectorLink": selector_link, "startOnCreate": start, "vip": vip } data = json.dumps(dict((k, v) for (k, v) in service_payload.items() if v is not None)) result = self.connection.request('%s/services' % self.baseuri, data=data, method='POST').object return result def ex_get_service(self, service_id): """ Get a service by ID :param service_id: The service_id to be obtained. :type service_id: ``str`` :rtype: ``dict`` """ result = self.connection.request("%s/services/%s" % (self.baseuri, service_id)).object return result def ex_search_services(self, search_params): """ Search for services matching certain filters i.e. ``{ "name": "awesomesause", "environmentId": "1e2"}`` :param search_params: A collection of search parameters to use. :type search_params: ``dict`` :rtype: ``list`` """ search_list = [] for f, v in search_params.items(): search_list.append(f + '=' + v) search_items = '&'.join(search_list) result = self.connection.request("%s/services?%s" % ( self.baseuri, search_items)).object return result['data'] def ex_destroy_service(self, service_id): """ Destroy a service by ID http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#delete :param service_id: The service to be destroyed. :type service_id: ``str`` :return: True if destroy was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('%s/services/%s' % (self.baseuri, service_id), method='DELETE') return result.status in VALID_RESPONSE_CODES def ex_activate_service(self, service_id): """ Activate a service. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#activate :param service_id: The service to activate services for. :type service_id: ``str`` :return: True if activate was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('%s/services/%s?action=activate' % (self.baseuri, service_id), method='POST') return result.status in VALID_RESPONSE_CODES def ex_deactivate_service(self, service_id): """ Deactivate a service. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#deactivate :param service_id: The service to deactivate services for. :type service_id: ``str`` :return: True if deactivate was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('%s/services/%s?action=deactivate' % (self.baseuri, service_id), method='POST') return result.status in VALID_RESPONSE_CODES def list_containers(self): """ List the deployed containers. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/container/ :rtype: ``list`` of :class:`libcloud.container.base.Container` """ result = self.connection.request("%s/containers" % self.baseuri).object containers = [self._to_container(value) for value in result['data']] return containers def deploy_container(self, name, image, parameters=None, start=True, **config): """ Deploy a new container. http://docs.rancher.com/rancher/v1.2/en/api/api-resources/container/#create **The following is the Image format used for ``ContainerImage``** *For a ``imageuuid``*: - ``docker:://:`` *The following applies*: - ``id`` = ```` - ``name`` = ```` - ``path`` = ``://`` - ``version`` = ```` *Any extra configuration can also be passed i.e. "environment"* :param name: The desired name of the container. (required) :type name: ``str`` :param image: The Image object to deploy. (required) :type image: :class:`libcloud.container.base.ContainerImage` :param parameters: Container Image parameters (unused) :type parameters: ``str`` :param start: Whether to start the container on creation(startOnCreate) :type start: ``bool`` :rtype: :class:`Container` """ payload = { "name": name, "imageUuid": self._degen_image(image), "startOnCreate": start, } config.update(payload) data = json.dumps(config) result = self.connection.request('%s/containers' % self.baseuri, data=data, method='POST').object return self._to_container(result) def get_container(self, con_id): """ Get a container by ID :param con_id: The ID of the container to get :type con_id: ``str`` :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request("%s/containers/%s" % (self.baseuri, con_id)).object return self._to_container(result) def start_container(self, container): """ Start a container :param container: The container to be started :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('%s/containers/%s?action=start' % (self.baseuri, container.id), method='POST').object return self._to_container(result) def stop_container(self, container): """ Stop a container :param container: The container to be stopped :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('%s/containers/%s?action=stop' % (self.baseuri, container.id), method='POST').object return self._to_container(result) def ex_search_containers(self, search_params): """ Search for containers matching certain filters i.e. ``{ "imageUuid": "docker:mysql", "state": "running"}`` :param search_params: A collection of search parameters to use. :type search_params: ``dict`` :rtype: ``list`` """ search_list = [] for f, v in search_params.items(): search_list.append(f + '=' + v) search_items = '&'.join(search_list) result = self.connection.request("%s/containers?%s" % ( self.baseuri, search_items)).object return result['data'] def destroy_container(self, container): """ Remove a container :param container: The container to be destroyed :type container: :class:`libcloud.container.base.Container` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('%s/containers/%s' % (self.baseuri, container.id), method='DELETE').object return self._to_container(result) def _gen_image(self, imageuuid): """ This function converts a valid Rancher ``imageUuid`` string to a valid image object. Only supports docker based images hence `docker:` must prefix!! Please see the deploy_container() for details on the format. :param imageuuid: A valid Rancher image string i.e. ``docker:rlister/hastebin:8.0`` :type imageuuid: ``str`` :return: Converted ContainerImage object. :rtype: :class:`libcloud.container.base.ContainerImage` """ # Obtain just the name(:version) for parsing if '/' not in imageuuid: # String looks like `docker:mysql:8.0` image_name_version = imageuuid.partition(':')[2] else: # String looks like `docker:oracle/mysql:8.0` image_name_version = imageuuid.rpartition("/")[2] # Parse based on ':' if ':' in image_name_version: version = image_name_version.partition(":")[2] id = image_name_version.partition(":")[0] name = id else: version = 'latest' id = image_name_version name = id # Get our path based on if there was a version if version != 'latest': path = imageuuid.partition(':')[2].rpartition(':')[0] else: path = imageuuid.partition(':')[2] return ContainerImage( id=id, name=name, path=path, version=version, driver=self.connection.driver, extra={ "imageUuid": imageuuid } ) def _degen_image(self, image): """ Take in an image object to break down into an ``imageUuid`` :param image: :return: """ # Only supporting docker atm image_type = "docker" if image.version is not None: return image_type + ':' + image.path + ':' + image.version else: return image_type + ':' + image.path def _to_container(self, data): """ Convert container in proper Container instance object ** Updating is NOT supported!! :param data: API data about container i.e. result.object :return: Proper Container object: see http://libcloud.readthedocs.io/en/latest/container/api.html """ rancher_state = data['state'] # A Removed container is purged after x amt of time. # Both of these render the container dead (can't be started later) terminate_condition = ["removed", "purged"] if 'running' in rancher_state: state = ContainerState.RUNNING elif 'stopped' in rancher_state: state = ContainerState.STOPPED elif 'restarting' in rancher_state: state = ContainerState.REBOOTING elif 'error' in rancher_state: state = ContainerState.ERROR elif any(x in rancher_state for x in terminate_condition): state = ContainerState.TERMINATED elif data['transitioning'] == 'yes': # Best we can do for current actions state = ContainerState.PENDING else: state = ContainerState.UNKNOWN # Everything contained in the json response is dumped in extra extra = data return Container( id=data['id'], name=data['name'], image=self._gen_image(data['imageUuid']), ip_addresses=[data['primaryIpAddress']], state=state, driver=self.connection.driver, extra=extra) apache-libcloud-2.2.1/libcloud/container/drivers/joyent.py0000664000175000017500000000566612701023453023547 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.container.providers import Provider from libcloud.container.drivers.docker import (DockerContainerDriver, DockerConnection) class JoyentContainerDriver(DockerContainerDriver): """ Joyent Triton container driver class. >>> from libcloud.container.providers import get_driver >>> driver = get_driver('joyent') >>> conn = driver(host='https://us-east-1.docker.joyent.com', port=2376, key_file='key.pem', cert_file='cert.pem') """ type = Provider.JOYENT name = 'Joyent Triton' website = 'http://joyent.com' connectionCls = DockerConnection supports_clusters = False def __init__(self, key=None, secret=None, secure=False, host='localhost', port=2376, key_file=None, cert_file=None): super(JoyentContainerDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, key_file=key_file, cert_file=cert_file) if host.startswith('https://'): secure = True # strip the prefix prefixes = ['http://', 'https://'] for prefix in prefixes: if host.startswith(prefix): host = host.strip(prefix) if key_file or cert_file: # docker tls authentication- # https://docs.docker.com/articles/https/ # We pass two files, a key_file with the # private key and cert_file with the certificate # libcloud will handle them through LibcloudHTTPSConnection if not (key_file and cert_file): raise Exception( 'Needs both private key file and ' 'certificate file for tls authentication') self.connection.key_file = key_file self.connection.cert_file = cert_file self.connection.secure = True else: self.connection.secure = secure self.connection.host = host self.connection.port = port apache-libcloud-2.2.1/libcloud/container/drivers/kubernetes.py0000664000175000017500000003131413153541406024400 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import datetime try: import simplejson as json except: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError from libcloud.container.base import (Container, ContainerDriver, ContainerImage, ContainerCluster) from libcloud.container.providers import Provider from libcloud.container.types import ContainerState VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] ROOT_URL = '/api/' class KubernetesResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_error(self): if self.status == 401: raise InvalidCredsError('Invalid credentials') return self.body def success(self): return self.status in self.valid_response_codes class KubernetesException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "KubernetesException %s %s" % (self.code, self.message) class KubernetesConnection(ConnectionUserAndKey): responseCls = KubernetesResponse timeout = 60 def add_default_headers(self, headers): """ Add parameters that are necessary for every request If user and password are specified, include a base http auth header """ headers['Content-Type'] = 'application/json' if self.key and self.secret: user_b64 = base64.b64encode(b('%s:%s' % (self.key, self.secret))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class KubernetesPod(object): def __init__(self, name, containers, namespace): """ A Kubernetes pod """ self.name = name self.containers = containers self.namespace = namespace class KubernetesContainerDriver(ContainerDriver): type = Provider.KUBERNETES name = 'Kubernetes' website = 'http://kubernetes.io' connectionCls = KubernetesConnection supports_clusters = True def __init__(self, key=None, secret=None, secure=False, host='localhost', port=4243): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: ``None`` """ super(KubernetesContainerDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port) if host is not None: if host.startswith('https://'): secure = True # strip the prefix prefixes = ['http://', 'https://'] for prefix in prefixes: if host.startswith(prefix): host = host.strip(prefix) self.connection.host = host self.connection.port = port self.connection.secure = secure self.connection.key = key self.connection.secret = secret def list_containers(self, image=None, all=True): """ List the deployed container images :param image: Filter to containers with a certain image :type image: :class:`libcloud.container.base.ContainerImage` :param all: Show all container (including stopped ones) :type all: ``bool`` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ try: result = self.connection.request( ROOT_URL + "v1/pods").object except Exception as exc: errno = getattr(exc, 'errno', None) if errno == 111: raise KubernetesException( errno, 'Make sure kube host is accessible' 'and the API port is correct') raise pods = [self._to_pod(value) for value in result['items']] containers = [] for pod in pods: containers.extend(pod.containers) return containers def get_container(self, id): """ Get a container by ID :param id: The ID of the container to get :type id: ``str`` :rtype: :class:`libcloud.container.base.Container` """ containers = self.list_containers() match = [container for container in containers if container.id == id] return match[0] def list_clusters(self): """ Get a list of namespaces that pods can be deployed into :param location: The location to search in :type location: :class:`libcloud.container.base.ClusterLocation` :rtype: ``list`` of :class:`libcloud.container.base.ContainerCluster` """ try: result = self.connection.request( ROOT_URL + "v1/namespaces/").object except Exception as exc: errno = getattr(exc, 'errno', None) if errno == 111: raise KubernetesException( errno, 'Make sure kube host is accessible' 'and the API port is correct') raise clusters = [self._to_cluster(value) for value in result['items']] return clusters def get_cluster(self, id): """ Get a cluster by ID :param id: The ID of the cluster to get :type id: ``str`` :rtype: :class:`libcloud.container.base.ContainerCluster` """ result = self.connection.request(ROOT_URL + "v1/namespaces/%s" % id).object return self._to_cluster(result) def destroy_cluster(self, cluster): """ Delete a cluster (namespace) :return: ``True`` if the destroy was successful, otherwise ``False``. :rtype: ``bool`` """ self.connection.request(ROOT_URL + "v1/namespaces/%s" % cluster.id, method='DELETE').object return True def create_cluster(self, name, location=None): """ Create a container cluster (a namespace) :param name: The name of the cluster :type name: ``str`` :param location: The location to create the cluster in :type location: :class:`.ClusterLocation` :rtype: :class:`.ContainerCluster` """ request = { 'metadata': { 'name': name } } result = self.connection.request(ROOT_URL + "v1/namespaces", method='POST', data=json.dumps(request)).object return self._to_cluster(result) def deploy_container(self, name, image, cluster=None, parameters=None, start=True): """ Deploy an installed container image. In kubernetes this deploys a single container Pod. https://cloud.google.com/container-engine/docs/pods/single-container :param name: The name of the new container :type name: ``str`` :param image: The container image to deploy :type image: :class:`.ContainerImage` :param cluster: The cluster to deploy to, None is default :type cluster: :class:`.ContainerCluster` :param parameters: Container Image parameters :type parameters: ``str`` :param start: Start the container on deployment :type start: ``bool`` :rtype: :class:`.Container` """ if cluster is None: namespace = 'default' else: namespace = cluster.id request = { "metadata": { "name": name }, "spec": { "containers": [ { "name": name, "image": image.name } ] } } result = self.connection.request(ROOT_URL + "v1/namespaces/%s/pods" % namespace, method='POST', data=json.dumps(request)).object return self._to_cluster(result) def destroy_container(self, container): """ Destroy a deployed container. Because the containers are single container pods, this will delete the pod. :param container: The container to destroy :type container: :class:`.Container` :rtype: ``bool`` """ return self.ex_destroy_pod(container.extra['namespace'], container.extra['pod']) def ex_list_pods(self): """ List available Pods :rtype: ``list`` of :class:`.KubernetesPod` """ result = self.connection.request(ROOT_URL + "v1/pods").object return [self._to_pod(value) for value in result['items']] def ex_destroy_pod(self, namespace, pod_name): """ Delete a pod and the containers within it. """ self.connection.request( ROOT_URL + "v1/namespaces/%s/pods/%s" % ( namespace, pod_name), method='DELETE').object return True def _to_pod(self, data): """ Convert an API response to a Pod object """ container_statuses = data['status']['containerStatuses'] containers = [] # response contains the status of the containers in a separate field for container in data['spec']['containers']: spec = list(filter(lambda i: i['name'] == container['name'], container_statuses))[0] containers.append( self._to_container(container, spec, data) ) return KubernetesPod( name=data['metadata']['name'], namespace=data['metadata']['namespace'], containers=containers) def _to_container(self, data, container_status, pod_data): """ Convert container in Container instances """ return Container( id=container_status['containerID'], name=data['name'], image=ContainerImage( id=container_status['imageID'], name=data['image'], path=None, version=None, driver=self.connection.driver), ip_addresses=None, state=ContainerState.RUNNING, driver=self.connection.driver, extra={ 'pod': pod_data['metadata']['name'], 'namespace': pod_data['metadata']['namespace'] }) def _to_cluster(self, data): """ Convert namespace to a cluster """ metadata = data['metadata'] status = data['status'] return ContainerCluster( id=metadata['name'], name=metadata['name'], driver=self.connection.driver, extra={'phase': status['phase']}) def ts_to_str(timestamp): """ Return a timestamp as a nicely formated datetime string. """ date = datetime.datetime.fromtimestamp(timestamp) date_string = date.strftime("%d/%m/%Y %H:%M %Z") return date_string apache-libcloud-2.2.1/libcloud/container/drivers/gke.py0000664000175000017500000001560213153541406023001 0ustar kamikami00000000000000from libcloud.common.google import GoogleOAuth2Credential from libcloud.container.providers import Provider from libcloud.container.drivers.kubernetes import KubernetesContainerDriver from libcloud.common.google import GoogleResponse from libcloud.common.google import GoogleBaseConnection API_VERSION = 'v1' class GKEResponse(GoogleResponse): pass class GKEConnection(GoogleBaseConnection): """ Connection class for the GKE driver. GKEConnection extends :class:`google.GoogleBaseConnection` for 3 reasons: 1. modify request_path for GKE URI. 2. Implement gce_params functionality described below. 3. Add request_aggregated_items method for making aggregated API calls. """ host = 'container.googleapis.com' responseCls = GKEResponse def __init__(self, user_id, key, secure, auth_type=None, credential_file=None, project=None, **kwargs): super(GKEConnection, self).__init__( user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, **kwargs) self.request_path = '/%s/projects/%s' % (API_VERSION, project) self.gke_params = None def pre_connect_hook(self, params, headers): """ Update URL parameters with values from self.gke_params. @inherits: :class:`GoogleBaseConnection.pre_connect_hook` """ params, headers = super(GKEConnection, self).pre_connect_hook(params, headers) if self.gke_params: params.update(self.gke_params) return params, headers def request(self, *args, **kwargs): """ Perform request then do GKE-specific processing of URL params. @inherits: :class:`GoogleBaseConnection.request` """ response = super(GKEConnection, self).request(*args, **kwargs) # If gce_params has been set, then update the pageToken with the # nextPageToken so it can be used in the next request. if self.gke_params: if 'nextPageToken' in response.object: self.gke_params['pageToken'] = response.object['nextPageToken'] elif 'pageToken' in self.gke_params: del self.gke_params['pageToken'] self.gke_params = None return response class GKEContainerDriver(KubernetesContainerDriver): """ GKE Container Driver class. This is the primary driver for interacting with Google Container Engine. It contains all of the standard libcloud methods, plus additional ex_* methods for more features. Note that many methods allow either objects or strings (or lists of objects/strings). In most cases, passing strings instead of objects will result in additional GKE API calls. """ connectionCls = GKEConnection api_name = 'google' name = "Google Container Engine" type = Provider.GKE website = 'https://container.googleapis.com' supports_clusters = True AUTH_URL = "https://container.googleapis.com/auth/" def __init__(self, user_id, key=None, datacenter=None, project=None, auth_type=None, scopes=None, credential_file=None, host=None, port=443, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :keyword datacenter: The name of the datacenter (zone) used for operations. :type datacenter: ``str`` :keyword project: Your GKE project name. (required) :type project: ``str`` :keyword auth_type: Accepted values are "SA" or "IA" or "GKE" ("Service Account" or "Installed Application" or "GKE" if libcloud is being used on a GKE instance with service account enabled). If not supplied, auth_type will be guessed based on value of user_id or if the code is being executed in a GKE instance. :type auth_type: ``str`` :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` :keyword credential_file: Path to file for caching authentication information used by GKEConnection. :type credential_file: ``str`` """ if not project: raise ValueError('Project name must be specified using ' '"project" keyword.') if host is None: host = GKEContainerDriver.website self.auth_type = auth_type self.project = project self.scopes = scopes self.zone = None if datacenter is not None: self.zone = datacenter self.credential_file = credential_file or \ GoogleOAuth2Credential.default_credential_file + '.' + self.project super(GKEContainerDriver, self).__init__(user_id, key, secure=True, host=None, port=None, **kwargs) self.base_path = '/%s/projects/%s' % (API_VERSION, self.project) self.website = GKEContainerDriver.website def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, 'project': self.project, 'scopes': self.scopes, 'credential_file': self.credential_file} def list_clusters(self, ex_zone=None): """ Return a list of cluster information in the current zone or all zones. :keyword ex_zone: Optional zone name or None :type ex_zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` """ request = "/zones/%s/clusters" % (ex_zone) if ex_zone is None: request = "/zones/clusters" response = self.connection.request(request, method='GET').object return response def get_server_config(self, ex_zone=None): """ Return configuration info about the Container Engine service. :keyword ex_zone: Optional zone name or None :type ex_zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` or ``None`` """ if ex_zone is None: ex_zone = self.zone request = "/zones/%s/serverconfig" % (ex_zone) response = self.connection.request(request, method='GET').object return response apache-libcloud-2.2.1/libcloud/container/drivers/docker.py0000664000175000017500000006415713153541406023513 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import datetime import shlex import re import os try: import simplejson as json except: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.common.base import KeyCertificateConnection from libcloud.common.types import InvalidCredsError from libcloud.container.base import (Container, ContainerDriver, ContainerImage) from libcloud.container.providers import Provider from libcloud.container.types import ContainerState VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] class DockerResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_body(self): if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: # error responses are tricky in Docker. Eg response could be # an error, but response status could still be 200 content_type = self.headers.get('content-type', 'application/json') if content_type == 'application/json' or content_type == '': if self.headers.get('transfer-encoding') == 'chunked' and \ 'fromImage' in self.request.url: body = [json.loads(chunk) for chunk in self.body.strip().replace('\r', '').split('\n')] else: body = json.loads(self.body) else: body = self.body except ValueError: m = re.search('Error: (.+?)"', self.body) if m: error_msg = m.group(1) raise Exception(error_msg) else: raise Exception( 'ConnectionError: Failed to parse JSON response') return body def parse_error(self): if self.status == 401: raise InvalidCredsError('Invalid credentials') return self.body def success(self): return self.status in self.valid_response_codes class DockerException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "DockerException %s %s" % (self.code, self.message) class DockerConnection(ConnectionUserAndKey): responseCls = DockerResponse timeout = 60 def add_default_headers(self, headers): """ Add parameters that are necessary for every request If user and password are specified, include a base http auth header """ headers['Content-Type'] = 'application/json' if self.user_id and self.key: user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class DockertlsConnection(KeyCertificateConnection): responseCls = DockerResponse def __init__(self, key, secret, secure=True, host='localhost', port=4243, key_file='', cert_file='', **kwargs): super(DockertlsConnection, self).__init__(key_file=key_file, cert_file=cert_file, secure=secure, host=host, port=port, url=None, proxy_url=None, timeout=None, backoff=None, retry_delay=None) if key_file: keypath = os.path.expanduser(key_file) is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise InvalidCredsError( 'You need an key PEM file to authenticate with ' 'Docker tls. This can be found in the server.' ) self.key_file = key_file certpath = os.path.expanduser(cert_file) is_file_path = os.path.exists( certpath) and os.path.isfile(certpath) if not is_file_path: raise InvalidCredsError( 'You need an certificate PEM file to authenticate with ' 'Docker tls. This can be found in the server.' ) self.cert_file = cert_file def add_default_headers(self, headers): headers['Content-Type'] = 'application/json' return headers class DockerContainerDriver(ContainerDriver): """ Docker container driver class. >>> from libcloud.container.providers import get_driver >>> driver = get_driver('docker') >>> conn = driver(host='198.61.239.128', port=4243) >>> conn.list_containers() or connecting to http basic auth protected https host: >>> conn = driver('user', 'pass', host='https://198.61.239.128', port=443) connect with tls authentication, by providing a hostname, port, a private key file (.pem) and certificate (.pem) file >>> conn = driver(host='https://198.61.239.128', >>> port=4243, key_file='key.pem', cert_file='cert.pem') """ type = Provider.DOCKER name = 'Docker' website = 'http://docker.io' connectionCls = DockerConnection supports_clusters = False version = '1.24' def __init__(self, key='', secret='', secure=False, host='localhost', port=4243, key_file=None, cert_file=None): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :param key_file: Path to private key for TLS connection (optional) :type key_file: ``str`` :param cert_file: Path to public key for TLS connection (optional) :type cert_file: ``str`` :return: ``None`` """ if key_file: self.connectionCls = DockertlsConnection self.key_file = key_file self.cert_file = cert_file secure = True if host.startswith('https://'): secure = True # strip the prefix prefixes = ['http://', 'https://'] for prefix in prefixes: if host.startswith(prefix): host = host.strip(prefix) super(DockerContainerDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, key_file=key_file, cert_file=cert_file) if key_file or cert_file: # docker tls authentication- # https://docs.docker.com/articles/https/ # We pass two files, a key_file with the # private key and cert_file with the certificate # libcloud will handle them through LibcloudHTTPSConnection if not (key_file and cert_file): raise Exception( 'Needs both private key file and ' 'certificate file for tls authentication') self.connection.secure = secure self.connection.host = host self.connection.port = port # set API version self.version = self._get_api_version() def _ex_connection_class_kwargs(self): kwargs = {} if hasattr(self, 'key_file'): kwargs['key_file'] = self.key_file if hasattr(self, 'cert_file'): kwargs['cert_file'] = self.cert_file return kwargs def install_image(self, path): """ Install a container image from a remote path. :param path: Path to the container image :type path: ``str`` :rtype: :class:`libcloud.container.base.ContainerImage` """ payload = { } data = json.dumps(payload) result = self.connection.request('/v%s/images/create?fromImage=%s' % (self.version, path), data=data, method='POST') if "errorDetail" in result.body: raise DockerException(None, result.body) image_id = None # the response is slightly different if the image is already present # and it's not downloaded. both messages below indicate that the image # is available for use to the daemon if re.search(r'Downloaded newer image', result.body) or \ re.search(r'"Status: Image is up to date', result.body): if re.search(r'sha256:(?P[a-z0-9]{64})', result.body): image_id = re.findall(r'sha256:(?P[a-z0-9]{64})', result.body)[-1] # if there is a failure message or if there is not an image id in the # response then throw an exception. if image_id is None: raise DockerException(None, 'failed to install image') image = ContainerImage( id=image_id, name=path, path=path, version=None, driver=self.connection.driver, extra={}) return image def list_images(self): """ List the installed container images :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ result = self.connection.request('/v%s/images/json' % (self.version)).object images = [] for image in result: try: name = image.get('RepoTags')[0] except: name = image.get('Id') images.append(ContainerImage( id=image.get('Id'), name=name, path=name, version=None, driver=self.connection.driver, extra={ "created": image.get('Created'), "size": image.get('Size'), "virtual_size": image.get('VirtualSize'), }, )) return images def list_containers(self, image=None, all=True): """ List the deployed container images :param image: Filter to containers with a certain image :type image: :class:`libcloud.container.base.ContainerImage` :param all: Show all container (including stopped ones) :type all: ``bool`` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ if all: ex = '?all=1' else: ex = '' try: result = self.connection.request( "/v%s/containers/json%s" % (self.version, ex)).object except Exception as exc: errno = getattr(exc, 'errno', None) if errno == 111: raise DockerException( errno, 'Make sure docker host is accessible' 'and the API port is correct') raise containers = [self._to_container(value) for value in result] return containers def deploy_container(self, name, image, parameters=None, start=True, command=None, hostname=None, user='', stdin_open=True, tty=True, mem_limit=0, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, network_disabled=False, entrypoint=None, cpu_shares=None, working_dir='', domainname=None, memswap_limit=0, port_bindings=None, network_mode='bridge', labels=None): """ Deploy an installed container image For details on the additional parameters see : http://bit.ly/1PjMVKV :param name: The name of the new container :type name: ``str`` :param image: The container image to deploy :type image: :class:`libcloud.container.base.ContainerImage` :param parameters: Container Image parameters :type parameters: ``str`` :param start: Start the container on deployment :type start: ``bool`` :rtype: :class:`Container` """ command = shlex.split(str(command)) if port_bindings is None: port_bindings = {} params = { 'name': name } payload = { 'Hostname': hostname, 'Domainname': domainname, 'ExposedPorts': ports, 'User': user, 'Tty': tty, 'OpenStdin': stdin_open, 'StdinOnce': False, 'Memory': mem_limit, 'AttachStdin': True, 'AttachStdout': True, 'AttachStderr': True, 'Env': environment, 'Cmd': command, 'Dns': dns, 'Image': image.name, 'Volumes': volumes, 'VolumesFrom': volumes_from, 'NetworkDisabled': network_disabled, 'Entrypoint': entrypoint, 'CpuShares': cpu_shares, 'WorkingDir': working_dir, 'MemorySwap': memswap_limit, 'PublishAllPorts': True, 'PortBindings': port_bindings, 'NetworkMode': network_mode, 'Labels': labels, } data = json.dumps(payload) try: result = self.connection.request('/v%s/containers/create' % (self.version), data=data, params=params, method='POST') except Exception as e: message = e.message or str(e) if message.startswith('No such image:'): raise DockerException(None, 'No such image: %s' % image.name) else: raise DockerException(None, e) id_ = result.object['Id'] payload = { 'Binds': [], 'PublishAllPorts': True, 'PortBindings': port_bindings, } data = json.dumps(payload) if start: if float(self._get_api_version()) > 1.22: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, id_), method='POST') else: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, id_), data=data, method='POST') return self.get_container(id_) def get_container(self, id): """ Get a container by ID :param id: The ID of the container to get :type id: ``str`` :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request("/v%s/containers/%s/json" % (self.version, id)).object return self._to_container(result) def start_container(self, container): """ Start a container :param container: The container to be started :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ if float(self._get_api_version()) > 1.22: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, container.id), method='POST') else: payload = { 'Binds': [], 'PublishAllPorts': True, } data = json.dumps(payload) result = self.connection.request( '/v%s/containers/%s/start' % (self.version, container.id), method='POST', data=data) if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to start container') def stop_container(self, container): """ Stop a container :param container: The container to be stopped :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('/v%s/containers/%s/stop' % (self.version, container.id), method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to stop container') def restart_container(self, container): """ Restart a container :param container: The container to be stopped :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ data = json.dumps({'t': 10}) # number of seconds to wait before killing the container result = self.connection.request('/v%s/containers/%s/restart' % (self.version, container.id), data=data, method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to restart container') def destroy_container(self, container): """ Remove a container :param container: The container to be destroyed :type container: :class:`libcloud.container.base.Container` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('/v%s/containers/%s' % (self.version, container.id), method='DELETE') return result.status in VALID_RESPONSE_CODES def ex_list_processes(self, container): """ List processes running inside a container :param container: The container to list processes for. :type container: :class:`libcloud.container.base.Container` :rtype: ``str`` """ result = self.connection.request("/v%s/containers/%s/top" % (self.version, container.id)).object return result def ex_rename_container(self, container, name): """ Rename a container :param container: The container to be renamed :type container: :class:`libcloud.container.base.Container` :param name: The new name :type name: ``str`` :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('/v%s/containers/%s/rename?name=%s' % (self.version, container.id, name), method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) def ex_get_logs(self, container, stream=False): """ Get container logs If stream == True, logs will be yielded as a stream From Api Version 1.11 and above we need a GET request to get the logs Logs are in different format of those of Version 1.10 and below :param container: The container to list logs for :type container: :class:`libcloud.container.base.Container` :param stream: Stream the output :type stream: ``bool`` :rtype: ``bool`` """ payload = {} data = json.dumps(payload) if float(self._get_api_version()) > 1.10: result = self.connection.request( "/v%s/containers/%s/logs?follow=%s&stdout=1&stderr=1" % (self.version, container.id, str(stream))).object logs = result else: result = self.connection.request( "/v%s/containers/%s/attach?logs=1&stream=%s&stdout=1&stderr=1" % (self.version, container.id, str(stream)), method='POST', data=data) logs = result.body return logs def ex_search_images(self, term): """Search for an image on Docker.io. Returns a list of ContainerImage objects >>> images = conn.ex_search_images(term='mistio') >>> images [, ] :param term: The search term :type term: ``str`` :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ term = term.replace(' ', '+') result = self.connection.request('/v%s/images/search?term=%s' % (self.version, term)).object images = [] for image in result: name = image.get('name') images.append( ContainerImage( id=name, path=name, version=None, name=name, driver=self.connection.driver, extra={ "description": image.get('description'), "is_official": image.get('is_official'), "is_trusted": image.get('is_trusted'), "star_count": image.get('star_count'), }, )) return images def ex_delete_image(self, image): """ Remove image from the filesystem :param image: The image to remove :type image: :class:`libcloud.container.base.ContainerImage` :rtype: ``bool`` """ result = self.connection.request('/v%s/images/%s' % (self.version, image.name), method='DELETE') return result.status in VALID_RESPONSE_CODES def _to_container(self, data): """ Convert container in Container instances """ try: name = data.get('Name').strip('/') except: try: name = data.get('Names')[0].strip('/') except: name = data.get('Id') state = data.get('State') if isinstance(state, dict): status = data.get( 'Status', state.get('Status') if state is not None else None) else: status = data.get('Status') if 'Exited' in status: state = ContainerState.STOPPED elif status.startswith('Up '): state = ContainerState.RUNNING elif 'running' in status: state = ContainerState.RUNNING else: state = ContainerState.STOPPED image = data.get('Image') ports = data.get('Ports', []) created = data.get('Created') if isinstance(created, float): created = ts_to_str(created) extra = { 'id': data.get('Id'), 'status': data.get('Status'), 'created': created, 'image': image, 'ports': ports, 'command': data.get('Command'), 'sizerw': data.get('SizeRw'), 'sizerootfs': data.get('SizeRootFs'), } ips = [] if ports is not None: for port in ports: if port.get('IP') is not None: ips.append(port.get('IP')) return Container( id=data['Id'], name=name, image=ContainerImage( id=data.get('ImageID', None), path=image, name=image, version=None, driver=self.connection.driver ), ip_addresses=ips, state=state, driver=self.connection.driver, extra=extra) def _get_api_version(self): """ Get the docker API version information """ result = self.connection.request('/version').object result = result or {} api_version = result.get('ApiVersion') return api_version def ts_to_str(timestamp): """ Return a timestamp as a nicely formated datetime string. """ date = datetime.datetime.fromtimestamp(timestamp) date_string = date.strftime("%d/%m/%Y %H:%M %Z") return date_string apache-libcloud-2.2.1/libcloud/container/drivers/dummy.py0000664000175000017500000000306512701023453023361 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.container.base import ContainerDriver class DummyContainerDriver(ContainerDriver): """ Dummy Container driver. >>> from libcloud.container.drivers.dummy import DummyContainerDriver >>> driver = DummyContainerDriver('key', 'secret') >>> driver.name 'Dummy Container Provider' """ name = 'Dummy Container Provider' website = 'http://example.com' supports_clusters = False def __init__(self, api_key, api_secret): """ :param api_key: API key or username to used (required) :type api_key: ``str`` :param api_secret: Secret password to be used (required) :type api_secret: ``str`` :rtype: ``None`` """ if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/container/base.py0000664000175000017500000003001712701023453021457 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from libcloud.common.base import ConnectionUserAndKey, BaseDriver __all__ = [ 'Container', 'ContainerImage', 'ContainerCluster', 'ClusterLocation', 'ContainerDriver' ] class Container(object): """ Container. """ def __init__(self, id, name, image, state, ip_addresses, driver, extra=None): """ :param id: Container id. :type id: ``str`` :param name: The name of the container. :type name: ``str`` :param image: The image this container was deployed using. :type image: :class:`.ContainerImage` :param state: The state of the container, e.g. running :type state: :class:`libcloud.container.types.ContainerState` :param ip_addresses: A list of IP addresses for this container :type ip_addresses: ``list`` of ``str`` :param driver: ContainerDriver instance. :type driver: :class:`.ContainerDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.image = image self.state = state self.ip_addresses = ip_addresses self.driver = driver self.extra = extra or {} def start(self): return self.driver.start_container(container=self) def stop(self): return self.driver.stop_container(container=self) def restart(self): return self.driver.restart_container(container=self) def destroy(self): return self.driver.destroy_container(container=self) def __repr__(self): return ('' % (self.id, self.name, self.state, self.driver.name)) class ContainerImage(object): """ Container Image. """ def __init__(self, id, name, path, version, driver, extra=None): """ :param id: Container Image id. :type id: ``str`` :param name: The name of the image. :type name: ``str`` :param path: The path to the image :type path: ``str`` :param version: The version of the image :type version: ``str`` :param driver: ContainerDriver instance. :type driver: :class:`.ContainerDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.path = path self.version = version self.driver = driver self.extra = extra or {} def deploy(self, name, parameters, *args, **kwargs): return self.driver.deploy_container(name=name, image=self, parameters=parameters, *args, **kwargs) def __repr__(self): return ('' % (self.id, self.name, self.path)) class ContainerCluster(object): """ A cluster group for containers """ def __init__(self, id, name, driver, extra=None): """ :param id: Container Image id. :type id: ``str`` :param name: The name of the image. :type name: ``str`` :param driver: ContainerDriver instance. :type driver: :class:`.ContainerDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.driver = driver self.extra = extra or {} def list_containers(self): return self.driver.list_containers(cluster=self) def destroy(self): return self.driver.destroy_cluster(cluster=self) def __repr__(self): return ('' % (self.id, self.name, self.driver.name)) class ClusterLocation(object): """ A physical location where clusters can be. >>> from libcloud.container.drivers.dummy import DummyContainerDriver >>> driver = DummyContainerDriver(0) >>> location = driver.list_locations()[0] >>> location.country 'US' """ def __init__(self, id, name, country, driver): """ :param id: Location ID. :type id: ``str`` :param name: Location name. :type name: ``str`` :param country: Location country. :type country: ``str`` :param driver: Driver this location belongs to. :type driver: :class:`.ContainerDriver` """ self.id = str(id) self.name = name self.country = country self.driver = driver def __repr__(self): return (('') % (self.id, self.name, self.country, self.driver.name)) class ContainerDriver(BaseDriver): """ A base ContainerDriver class to derive from This class is always subclassed by a specific driver. """ connectionCls = ConnectionUserAndKey name = None website = None supports_clusters = False """ Whether the driver supports containers being deployed into clusters """ def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: ``None`` """ super(ContainerDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def install_image(self, path): """ Install a container image from a remote path. :param path: Path to the container image :type path: ``str`` :rtype: :class:`.ContainerImage` """ raise NotImplementedError( 'install_image not implemented for this driver') def list_images(self): """ List the installed container images :rtype: ``list`` of :class:`.ContainerImage` """ raise NotImplementedError( 'list_images not implemented for this driver') def list_containers(self, image=None, cluster=None): """ List the deployed container images :param image: Filter to containers with a certain image :type image: :class:`.ContainerImage` :param cluster: Filter to containers in a cluster :type cluster: :class:`.ContainerCluster` :rtype: ``list`` of :class:`.Container` """ raise NotImplementedError( 'list_containers not implemented for this driver') def deploy_container(self, name, image, cluster=None, parameters=None, start=True): """ Deploy an installed container image :param name: The name of the new container :type name: ``str`` :param image: The container image to deploy :type image: :class:`.ContainerImage` :param cluster: The cluster to deploy to, None is default :type cluster: :class:`.ContainerCluster` :param parameters: Container Image parameters :type parameters: ``str`` :param start: Start the container on deployment :type start: ``bool`` :rtype: :class:`.Container` """ raise NotImplementedError( 'deploy_container not implemented for this driver') def get_container(self, id): """ Get a container by ID :param id: The ID of the container to get :type id: ``str`` :rtype: :class:`.Container` """ raise NotImplementedError( 'get_container not implemented for this driver') def start_container(self, container): """ Start a deployed container :param container: The container to start :type container: :class:`.Container` :rtype: :class:`.Container` """ raise NotImplementedError( 'start_container not implemented for this driver') def stop_container(self, container): """ Stop a deployed container :param container: The container to stop :type container: :class:`.Container` :rtype: :class:`.Container` """ raise NotImplementedError( 'stop_container not implemented for this driver') def restart_container(self, container): """ Restart a deployed container :param container: The container to restart :type container: :class:`.Container` :rtype: :class:`.Container` """ raise NotImplementedError( 'restart_container not implemented for this driver') def destroy_container(self, container): """ Destroy a deployed container :param container: The container to destroy :type container: :class:`.Container` :rtype: :class:`.Container` """ raise NotImplementedError( 'destroy_container not implemented for this driver') def list_locations(self): """ Get a list of potential locations to deploy clusters into :rtype: ``list`` of :class:`.ClusterLocation` """ raise NotImplementedError( 'list_locations not implemented for this driver') def create_cluster(self, name, location=None): """ Create a container cluster :param name: The name of the cluster :type name: ``str`` :param location: The location to create the cluster in :type location: :class:`.ClusterLocation` :rtype: :class:`.ContainerCluster` """ raise NotImplementedError( 'create_cluster not implemented for this driver') def destroy_cluster(self, cluster): """ Delete a cluster :return: ``True`` if the destroy was successful, otherwise ``False``. :rtype: ``bool`` """ raise NotImplementedError( 'destroy_cluster not implemented for this driver') def list_clusters(self, location=None): """ Get a list of potential locations to deploy clusters into :param location: The location to search in :type location: :class:`.ClusterLocation` :rtype: ``list`` of :class:`.ContainerCluster` """ raise NotImplementedError( 'list_clusters not implemented for this driver') def get_cluster(self, id): """ Get a cluster by ID :param id: The ID of the cluster to get :type id: ``str`` :rtype: :class:`.ContainerCluster` """ raise NotImplementedError( 'list_clusters not implemented for this driver') apache-libcloud-2.2.1/libcloud/container/utils/0000775000175000017500000000000013160535107021336 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/utils/__init__.py0000664000175000017500000000000012701023453023431 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/container/utils/docker.py0000664000175000017500000001412313153541406023161 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from base64 import b64encode from libcloud.common.base import Connection, JsonResponse from libcloud.container.base import ContainerImage __all__ = [ 'RegistryClient', 'HubClient' ] class DockerHubConnection(Connection): responseCls = JsonResponse def __init__(self, host, username=None, password=None, secure=True, port=None, url=None, timeout=None, proxy_url=None, backoff=None, retry_delay=None): super(DockerHubConnection, self).__init__(secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url, backoff=backoff, retry_delay=retry_delay) self.username = username self.password = password def add_default_headers(self, headers): headers['Content-Type'] = 'application/json' if self.username is not None: authstr = 'Basic ' + str( b64encode( ('%s:%s' % (self.username, self.password)) .encode('latin1')) .strip() ) headers['Authorization'] = authstr return headers class RegistryClient(object): """ A client for the Docker v2 registry API """ connectionCls = DockerHubConnection def __init__(self, host, username=None, password=None, **kwargs): """ Construct a Docker hub client :param username: (optional) Your Hub account username :type username: ``str`` :param password: (optional) Your hub account password :type password: ``str`` """ self.connection = self.connectionCls(host, username, password, **kwargs) def list_images(self, repository_name, namespace='library', max_count=100): """ List the tags (versions) in a repository :param repository_name: The name of the repository e.g. 'ubuntu' :type repository_name: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :param max_count: The maximum number of records to return :type max_count: ``int`` :return: A list of images :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ path = '/v2/repositories/%s/%s/tags/?page=1&page_size=%s' \ % (namespace, repository_name, max_count) response = self.connection.request(path) images = [] for image in response.object['results']: images.append(self._to_image(repository_name, image)) return images def get_repository(self, repository_name, namespace='library'): """ Get the information about a specific repository :param repository_name: The name of the repository e.g. 'ubuntu' :type repository_name: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :return: The details of the repository :rtype: ``object`` """ path = '/v2/repositories/%s/%s/' % (namespace, repository_name) response = self.connection.request(path) return response.object def get_image(self, repository_name, tag='latest', namespace='library'): """ Get an image from a repository with a specific tag :param repository_name: The name of the repository, e.g. ubuntu :type repository_name: ``str`` :param tag: (optional) The image tag (defaults to latest) :type tag: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :return: A container image :rtype: :class:`libcloud.container.base.ContainerImage` """ path = '/v2/repositories/%s/%s/tags/%s/' \ % (namespace, repository_name, tag) response = self.connection.request(path) return self._to_image(repository_name, response.object) def _to_image(self, repository_name, obj): path = '%s/%s:%s' % (self.connection.host, repository_name, obj['name']) return ContainerImage( id=obj['id'], path=path, name=path, version=obj['name'], extra={ 'full_size': obj['full_size'] }, driver=None ) class HubClient(RegistryClient): """ A client for the Docker Hub API The hub is based on the v2 registry API """ host = 'registry.hub.docker.com' def __init__(self, username=None, password=None, **kwargs): """ Construct a Docker hub client :param username: (optional) Your Hub account username :type username: ``str`` :param password: (optional) Your hub account password :type password: ``str`` """ super(HubClient, self).__init__(self.host, username, password, **kwargs) apache-libcloud-2.2.1/libcloud/loadbalancer/0000775000175000017500000000000013160535107020623 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/loadbalancer/providers.py0000664000175000017500000000471313153541406023220 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.loadbalancer.types import Provider from libcloud.loadbalancer.types import OLD_CONSTANT_TO_NEW_MAPPING from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver __all__ = [ "Provider", "DRIVERS", "get_driver", ] DRIVERS = { Provider.RACKSPACE: ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), Provider.GOGRID: ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'), Provider.NINEFOLD: ('libcloud.loadbalancer.drivers.ninefold', 'NinefoldLBDriver'), Provider.BRIGHTBOX: ('libcloud.loadbalancer.drivers.brightbox', 'BrightboxLBDriver'), Provider.ELB: ('libcloud.loadbalancer.drivers.elb', 'ElasticLBDriver'), Provider.ALB: ('libcloud.loadbalancer.drivers.alb', 'ApplicationLBDriver'), Provider.CLOUDSTACK: ('libcloud.loadbalancer.drivers.cloudstack', 'CloudStackLBDriver'), Provider.GCE: ('libcloud.loadbalancer.drivers.gce', 'GCELBDriver'), Provider.SOFTLAYER: ('libcloud.loadbalancer.drivers.softlayer', 'SoftlayerLBDriver'), Provider.DIMENSIONDATA: ('libcloud.loadbalancer.drivers.dimensiondata', 'DimensionDataLBDriver'), Provider.ALIYUN_SLB: ('libcloud.loadbalancer.drivers.slb', 'SLBDriver'), } def get_driver(provider): deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING return _get_provider_driver(drivers=DRIVERS, provider=provider, deprecated_constants=deprecated_constants) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/loadbalancer/__init__.py0000664000175000017500000000160412701023453022731 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for working with Load Balancers """ __all__ = [ 'base', 'providers', 'types', 'drivers' ] apache-libcloud-2.2.1/libcloud/loadbalancer/types.py0000664000175000017500000000441213153541406022343 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ "Provider", "State", "LibcloudLBError", "LibcloudLBImmutableError", "OLD_CONSTANT_TO_NEW_MAPPING" ] from libcloud.common.types import LibcloudError class LibcloudLBError(LibcloudError): pass class LibcloudLBImmutableError(LibcloudLBError): pass class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. :cvar ALIYUN_SLB: Aliyun SLB loadbalancer driver """ ALB = 'alb' ALIYUN_SLB = 'aliyun_slb' BRIGHTBOX = 'brightbox' CLOUDSTACK = 'cloudstack' DIMENSIONDATA = 'dimensiondata' ELB = 'elb' GCE = 'gce' GOGRID = 'gogrid' NINEFOLD = 'ninefold' RACKSPACE = 'rackspace' SOFTLAYER = 'softlayer' # Deprecated RACKSPACE_US = 'rackspace_us' RACKSPACE_UK = 'rackspace_uk' OLD_CONSTANT_TO_NEW_MAPPING = { Provider.RACKSPACE_US: Provider.RACKSPACE, Provider.RACKSPACE_UK: Provider.RACKSPACE, } class State(object): """ Standard states for a loadbalancer :cvar RUNNING: loadbalancer is running and ready to use :cvar UNKNOWN: loabalancer state is unknown """ RUNNING = 0 PENDING = 1 UNKNOWN = 2 ERROR = 3 DELETED = 4 class MemberCondition(object): """ Each member of a load balancer can have an associated condition which determines its role within the load balancer. """ ENABLED = 0 DISABLED = 1 DRAINING = 2 apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/0000775000175000017500000000000013160535107022301 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/__init__.py0000664000175000017500000000147212701023453024412 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'rackspace', 'gogrid' ] apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/ninefold.py0000664000175000017500000000222112701023453024442 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.loadbalancer.providers import Provider from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver class NinefoldLBDriver(CloudStackLBDriver): "Driver for load balancers on Ninefold's Compute platform." host = 'api.ninefold.com' path = '/compute/v1.0/' type = Provider.NINEFOLD name = 'Ninefold LB' website = 'http://ninefold.com/' apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/slb.py0000664000175000017500000007353713153541406023453 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'SLB_API_VERSION', 'SLBDriver' ] import sys try: import simplejson as json except ImportError: import json from libcloud.common.aliyun import AliyunXmlResponse, SignedAliyunConnection from libcloud.common.types import LibcloudError from libcloud.loadbalancer.types import State from libcloud.loadbalancer.base import Algorithm, Driver, LoadBalancer, Member from libcloud.utils.misc import ReprMixin from libcloud.utils.py3 import u from libcloud.utils.xml import findattr, findtext, findall SLB_API_VERSION = '2014-05-15' SLB_API_HOST = 'slb.aliyuncs.com' DEFAULT_SIGNATURE_VERSION = '1.0' STATE_MAPPINGS = { 'inactive': State.UNKNOWN, 'active': State.RUNNING, 'locked': State.PENDING } RESOURCE_EXTRA_ATTRIBUTES_MAP = { 'balancer': { 'create_timestamp': { 'xpath': 'CreateTimeStamp', 'transform_func': int }, 'address_type': { 'xpath': 'AddressType', 'transform_func': u }, 'region_id': { 'xpath': 'RegionId', 'transform_func': u }, 'region_id_alias': { 'xpath': 'RegionIdAlias', 'transform_func': u }, 'create_time': { 'xpath': 'CreateTime', 'transform_func': u }, 'master_zone_id': { 'xpath': 'MasterZoneId', 'transform_func': u }, 'slave_zone_id': { 'xpath': 'SlaveZoneId', 'transform_func': u }, 'network_type': { 'xpath': 'NetworkType', 'transform_func': u } } } SLB_SCHEDULER_TO_ALGORITHM = { 'wrr': Algorithm.WEIGHTED_ROUND_ROBIN, 'wlc': Algorithm.WEIGHTED_LEAST_CONNECTIONS } ALGORITHM_TO_SLB_SCHEDULER = { Algorithm.WEIGHTED_ROUND_ROBIN: 'wrr', Algorithm.WEIGHTED_LEAST_CONNECTIONS: 'wlc' } class SLBConnection(SignedAliyunConnection): api_version = SLB_API_VERSION host = SLB_API_HOST responseCls = AliyunXmlResponse service_name = 'slb' class SLBLoadBalancerAttribute(object): """ This class used to get listeners and backend servers related to a balancer listeners is a ``list`` of ``dict``, each element contains 'ListenerPort' and 'ListenerProtocol' keys. backend_servers is a ``list`` of ``dict``, each element contains 'ServerId' and 'Weight' keys. """ def __init__(self, balancer, listeners, backend_servers, extra=None): self.balancer = balancer self.listeners = listeners or [] self.backend_servers = backend_servers or [] self.extra = extra or {} def is_listening(self, port): for listener in self.listeners: if listener.get('ListenerPort') == port: return True return False def is_attached(self, member): for server in self.backend_servers: if server.get('Serverid') == member.id: return True return False def __repr__(self): return ('' % (self.balancer.id, self.listeners, self.backend_servers)) class SLBLoadBalancerListener(ReprMixin, object): """ Base SLB load balancer listener class """ _repr_attributes = ['port', 'backend_port', 'scheduler', 'bandwidth'] action = None option_keys = [] def __init__(self, port, backend_port, algorithm, bandwidth, extra=None): self.port = port self.backend_port = backend_port self.scheduler = ALGORITHM_TO_SLB_SCHEDULER.get(algorithm, 'wrr') self.bandwidth = bandwidth self.extra = extra or {} @classmethod def create(cls, port, backend_port, algorithm, bandwidth, extra=None): return cls(port, backend_port, algorithm, bandwidth, extra=extra) def get_create_params(self): params = self.get_required_params() options = self.get_optional_params() options.update(params) return options def get_required_params(self): params = {'Action': self.action, 'ListenerPort': self.port, 'BackendServerPort': self.backend_port, 'Scheduler': self.scheduler, 'Bandwidth': self.bandwidth} return params def get_optional_params(self): options = {} for option in self.option_keys: if self.extra and option in self.extra: options[option] = self.extra[option] return options class SLBLoadBalancerHttpListener(SLBLoadBalancerListener): """ This class represents a rule to route http request to the backends. """ action = 'CreateLoadBalancerHTTPListener' option_keys = ['XForwardedFor', 'StickySessionType', 'CookieTimeout', 'Cookie', 'HealthCheckDomain', 'HealthCheckURI', 'HealthCheckConnectPort', 'HealthyThreshold', 'UnhealthyThreshold', 'HealthCheckTimeout', 'HealthCheckInterval', 'HealthCheckHttpCode'] def __init__(self, port, backend_port, algorithm, bandwidth, sticky_session, health_check, extra=None): super(SLBLoadBalancerHttpListener, self).__init__( port, backend_port, algorithm, bandwidth, extra=extra) self.sticky_session = sticky_session self.health_check = health_check def get_required_params(self): params = super(SLBLoadBalancerHttpListener, self).get_required_params() params['StickySession'] = self.sticky_session params['HealthCheck'] = self.health_check return params @classmethod def create(cls, port, backend_port, algorithm, bandwidth, extra={}): if 'StickySession' not in extra: raise AttributeError('StickySession is required') if 'HealthCheck' not in extra: raise AttributeError('HealthCheck is required') sticky_session = extra['StickySession'] health_check = extra['HealthCheck'] return cls(port, backend_port, algorithm, bandwidth, sticky_session, health_check, extra=extra) class SLBLoadBalancerHttpsListener(SLBLoadBalancerListener): """ This class represents a rule to route https request to the backends. """ action = 'CreateLoadBalancerHTTPSListener' option_keys = ['XForwardedFor', 'StickySessionType', 'CookieTimeout', 'Cookie', 'HealthCheckDomain', 'HealthCheckURI', 'HealthCheckConnectPort', 'HealthyThreshold', 'UnhealthyThreshold', 'HealthCheckTimeout', 'HealthCheckInterval', 'HealthCheckHttpCode'] def __init__(self, port, backend_port, algorithm, bandwidth, sticky_session, health_check, certificate_id, extra=None): super(SLBLoadBalancerHttpsListener, self).__init__( port, backend_port, algorithm, bandwidth, extra=extra) self.sticky_session = sticky_session self.health_check = health_check self.certificate_id = certificate_id def get_required_params(self): params = super(SLBLoadBalancerHttpsListener, self).get_required_params() params['StickySession'] = self.sticky_session params['HealthCheck'] = self.health_check params['ServerCertificateId'] = self.certificate_id return params @classmethod def create(cls, port, backend_port, algorithm, bandwidth, extra={}): if 'StickySession' not in extra: raise AttributeError('StickySession is required') if 'HealthCheck' not in extra: raise AttributeError('HealthCheck is required') if 'ServerCertificateId' not in extra: raise AttributeError('ServerCertificateId is required') sticky_session = extra['StickySession'] health_check = extra['HealthCheck'] certificate_id = extra['ServerCertificateId'] return cls(port, backend_port, algorithm, bandwidth, sticky_session, health_check, certificate_id, extra=extra) class SLBLoadBalancerTcpListener(SLBLoadBalancerListener): """ This class represents a rule to route tcp request to the backends. """ action = 'CreateLoadBalancerTCPListener' option_keys = ['PersistenceTimeout', 'HealthCheckType', 'HealthCheckDomain', 'HealthCheckURI', 'HealthCheckConnectPort', 'HealthyThreshold', 'UnhealthyThreshold', 'HealthCheckConnectTimeout', 'HealthCheckInterval', 'HealthCheckHttpCode'] class SLBLoadBalancerUdpListener(SLBLoadBalancerTcpListener): """ This class represents a rule to route udp request to the backends. """ action = 'CreateLoadBalancerUDPListener' option_keys = ['PersistenceTimeout', 'HealthCheckConnectPort', 'HealthyThreshold', 'UnhealthyThreshold', 'HealthCheckConnectTimeout', 'HealthCheckInterval'] class SLBServerCertificate(ReprMixin, object): _repr_attributes = ['id', 'name', 'fingerprint'] def __init__(self, id, name, fingerprint): self.id = id self.name = name self.fingerprint = fingerprint PROTOCOL_TO_LISTENER_MAP = { 'http': SLBLoadBalancerHttpListener, 'https': SLBLoadBalancerHttpsListener, 'tcp': SLBLoadBalancerTcpListener, 'udp': SLBLoadBalancerUdpListener } class SLBDriver(Driver): """ Aliyun SLB load balancer driver. """ name = 'Aliyun Server Load Balancer' website = 'https://www.aliyun.com/product/slb' connectionCls = SLBConnection path = '/' namespace = None _VALUE_TO_ALGORITHM_MAP = SLB_SCHEDULER_TO_ALGORITHM _ALGORITHM_TO_VALUE_MAP = ALGORITHM_TO_SLB_SCHEDULER def __init__(self, access_id, secret, region): super(SLBDriver, self).__init__(access_id, secret) self.region = region def list_protocols(self): return list(PROTOCOL_TO_LISTENER_MAP.keys()) def list_balancers(self, ex_balancer_ids=None, ex_filters=None): """ List all loadbalancers @inherits :class:`Driver.list_balancers` :keyword ex_balancer_ids: a list of balancer ids to filter results Only balancers which's id in this list will be returned :type ex_balancer_ids: ``list`` of ``str`` :keyword ex_filters: attributes to filter results. Only balancers which have all the desired attributes and values will be returned :type ex_filters: ``dict`` """ params = {'Action': 'DescribeLoadBalancers', 'RegionId': self.region} if ex_balancer_ids and isinstance(ex_balancer_ids, list): params['LoadBalancerId'] = ','.join(ex_balancer_ids) if ex_filters and isinstance(ex_filters, dict): ex_filters.update(params) params = ex_filters resp_body = self.connection.request(self.path, params=params).object return self._to_balancers(resp_body) def create_balancer(self, name, port, protocol, algorithm, members, ex_bandwidth=None, ex_internet_charge_type=None, ex_address_type=None, ex_vswitch_id=None, ex_master_zone_id=None, ex_slave_zone_id=None, ex_client_token=None, **kwargs): """ Create a new load balancer instance @inherits: :class:`Driver.create_balancer` :keyword ex_bandwidth: The max bandwidth limit for `paybybandwidth` internet charge type, in Mbps unit :type ex_bandwidth: ``int`` in range [1, 1000] :keyword ex_internet_charge_type: The internet charge type :type ex_internet_charge_type: a ``str`` of `paybybandwidth` or `paybytraffic` :keyword ex_address_type: The listening IP address type :type ex_address_type: a ``str`` of `internet` or `intranet` :keyword ex_vswitch_id: The vswitch id in a VPC network :type ex_vswitch_id: ``str`` :keyword ex_master_zone_id: The id of the master availability zone :type ex_master_zone_id: ``str`` :keyword ex_slave_zone_id: The id of the slave availability zone :type ex_slave_zone_id: ``str`` :keyword ex_client_token: The token generated by client to identify requests :type ex_client_token: ``str`` """ # 1.Create load balancer params = {'Action': 'CreateLoadBalancer', 'RegionId': self.region} if name: params['LoadBalancerName'] = name if not port: raise AttributeError('port is required') if not protocol: # NOTE(samsong8610): Use http listener as default protocol = 'http' if protocol not in PROTOCOL_TO_LISTENER_MAP: raise AttributeError('unsupport protocol %s' % protocol) # Bandwidth in range [1, 1000] Mbps bandwidth = -1 if ex_bandwidth: try: bandwidth = int(ex_bandwidth) except ValueError: raise AttributeError('ex_bandwidth should be a integer in ' 'range [1, 1000].') params['Bandwidth'] = bandwidth if ex_internet_charge_type: if ex_internet_charge_type.lower() == 'paybybandwidth': if bandwidth == -1: raise AttributeError('PayByBandwidth internet charge type' ' need ex_bandwidth be set') params['InternetChargeType'] = ex_internet_charge_type if ex_address_type: if ex_address_type.lower() not in ('internet', 'intranet'): raise AttributeError('ex_address_type should be "internet" ' 'or "intranet"') params['AddressType'] = ex_address_type if ex_vswitch_id: params['VSwitchId'] = ex_vswitch_id if ex_master_zone_id: params['MasterZoneId'] = ex_master_zone_id if ex_slave_zone_id: params['SlaveZoneId'] = ex_slave_zone_id if ex_client_token: params['ClientToken'] = ex_client_token if members and isinstance(members, list): backend_ports = [member.port for member in members] if len(set(backend_ports)) != 1: raise AttributeError('the ports of members should be unique') # NOTE(samsong8610): If members do not provide backend port, # default to listening port backend_port = backend_ports[0] or port else: backend_port = port balancer = None try: resp_body = self.connection.request(self.path, params).object balancer = self._to_balancer(resp_body) balancer.port = port # 2.Add backend servers if members is None: members = [] for member in members: self.balancer_attach_member(balancer, member) # 3.Create listener # NOTE(samsong8610): Assume only create a listener which uses all # the bandwidth. self.ex_create_listener(balancer, backend_port, protocol, algorithm, bandwidth, **kwargs) self.ex_start_listener(balancer, port) return balancer except Exception: e = sys.exc_info()[1] if balancer is not None: try: self.destroy_balancer(balancer) except Exception: pass raise e def destroy_balancer(self, balancer): params = {'Action': 'DeleteLoadBalancer', 'LoadBalancerId': balancer.id} resp = self.connection.request(self.path, params) return resp.success() def get_balancer(self, balancer_id): balancers = self.list_balancers(ex_balancer_ids=[balancer_id]) if len(balancers) != 1: raise LibcloudError('could not find load balancer with id %s' % balancer_id) return balancers[0] def balancer_attach_compute_node(self, balancer, node): if len(node.public_ips) > 0: ip = node.public_ips[0] else: ip = node.private_ips[0] member = Member(id=node.id, ip=ip, port=balancer.port) return self.balancer_attach_member(balancer, member) def balancer_attach_member(self, balancer, member): params = {'Action': 'AddBackendServers', 'LoadBalancerId': balancer.id} if member and isinstance(member, Member): params['BackendServers'] = self._to_servers_json([member]) self.connection.request(self.path, params) return member def balancer_detach_member(self, balancer, member): params = {'Action': 'RemoveBackendServers', 'LoadBalancerId': balancer.id} if member and isinstance(member, Member): params['BackendServers'] = self._list_to_json([member.id]) self.connection.request(self.path, params) return member def balancer_list_members(self, balancer): attribute = self.ex_get_balancer_attribute(balancer) members = [Member(server['ServerId'], None, None, balancer=balancer, extra={'Weight': server['Weight']}) for server in attribute.backend_servers] return members def ex_get_balancer_attribute(self, balancer): """ Get balancer attribute :param balancer: the balancer to get attribute :type balancer: ``LoadBalancer`` :return: the balancer attribute :rtype: ``SLBLoadBalancerAttribute`` """ params = {'Action': 'DescribeLoadBalancerAttribute', 'LoadBalancerId': balancer.id} resp_body = self.connection.request(self.path, params).object attribute = self._to_balancer_attribute(resp_body) return attribute def ex_list_listeners(self, balancer): """ Get all listener related to the given balancer :param balancer: the balancer to list listeners :type balancer: ``LoadBalancer`` :return: a list of listeners :rtype: ``list`` of ``SLBLoadBalancerListener`` """ attribute = self.ex_get_balancer_attribute(balancer) listeners = [SLBLoadBalancerListener(each['ListenerPort'], None, None, None) for each in attribute.listeners] return listeners def ex_create_listener(self, balancer, backend_port, protocol, algorithm, bandwidth, **kwargs): """ Create load balancer listening rule. :param balancer: the balancer which the rule belongs to. The listener created will listen on the port of the the balancer as default. 'ListenerPort' in kwargs will *OVERRIDE* it. :type balancer: ``LoadBalancer`` :param backend_port: the backend server port :type backend_port: ``int`` :param protocol: the balancer protocol, default to http :type protocol: ``str`` :param algorithm: the balancer routing algorithm :type algorithm: ``Algorithm`` :param bandwidth: the listener bandwidth limits :type bandwidth: ``str`` :return: the created listener :rtype: ``SLBLoadBalancerListener`` """ cls = PROTOCOL_TO_LISTENER_MAP.get(protocol, SLBLoadBalancerHttpListener) if 'ListenerPort' in kwargs: port = kwargs['ListenerPort'] else: port = balancer.port listener = cls.create(port, backend_port, algorithm, bandwidth, extra=kwargs) params = listener.get_create_params() params['LoadBalancerId'] = balancer.id params['RegionId'] = self.region resp = self.connection.request(self.path, params) return resp.success() def ex_start_listener(self, balancer, port): """ Start balancer's listener listening the given port. :param balancer: a load balancer :type balancer: ``LoadBalancer`` :param port: listening port :type port: ``int`` :return: whether operation is success :rtype: ``bool`` """ params = {'Action': 'StartLoadBalancerListener', 'LoadBalancerId': balancer.id, 'ListenerPort': port} resp = self.connection.request(self.path, params) return resp.success() def ex_stop_listener(self, balancer, port): """ Stop balancer's listener listening the given port. :param balancer: a load balancer :type balancer: ``LoadBalancer`` :param port: listening port :type port: ``int`` :return: whether operation is success :rtype: ``bool`` """ params = {'Action': 'StopLoadBalancerListener', 'LoadBalancerId': balancer.id, 'ListenerPort': port} resp = self.connection.request(self.path, params) return resp.success() def ex_upload_certificate(self, name, server_certificate, private_key): """ Upload certificate and private key for https load balancer listener :param name: the certificate name :type name: ``str`` :param server_certificate: the content of the certificate to upload in PEM format :type server_certificate: ``str`` :param private_key: the content of the private key to upload in PEM format :type private_key: ``str`` :return: new created certificate info :rtype: ``SLBServerCertificate`` """ params = {'Action': 'UploadServerCertificate', 'RegionId': self.region, 'ServerCertificate': server_certificate, 'PrivateKey': private_key} if name: params['ServerCertificateName'] = name resp_body = self.connection.request(self.path, params).object return self._to_server_certificate(resp_body) def ex_list_certificates(self, certificate_ids=[]): """ List all server certificates :param certificate_ids: certificate ids to filter results :type certificate_ids: ``str`` :return: certificates :rtype: ``SLBServerCertificate`` """ params = {'Action': 'DescribeServerCertificates', 'RegionId': self.region} if certificate_ids and isinstance(certificate_ids, list): params['ServerCertificateId'] = ','.join(certificate_ids) resp_body = self.connection.request(self.path, params).object cert_elements = findall(resp_body, 'ServerCertificates/ServerCertificate', namespace=self.namespace) certificates = [self._to_server_certificate(el) for el in cert_elements] return certificates def ex_delete_certificate(self, certificate_id): """ Delete the given server certificate :param certificate_id: the id of the certificate to delete :type certificate_id: ``str`` :return: whether process is success :rtype: ``bool`` """ params = {'Action': 'DeleteServerCertificate', 'RegionId': self.region, 'ServerCertificateId': certificate_id} resp = self.connection.request(self.path, params) return resp.success() def ex_set_certificate_name(self, certificate_id, name): """ Set server certificate name. :param certificate_id: the id of the server certificate to update :type certificate_id: ``str`` :param name: the new name :type name: ``str`` :return: whether updating is success :rtype: ``bool`` """ params = {'Action': 'SetServerCertificateName', 'RegionId': self.region, 'ServerCertificateId': certificate_id, 'ServerCertificateName': name} resp = self.connection.request(self.path, params) return resp.success() def _to_balancers(self, element): xpath = 'LoadBalancers/LoadBalancer' return [self._to_balancer(el) for el in findall(element=element, xpath=xpath, namespace=self.namespace)] def _to_balancer(self, el): _id = findtext(element=el, xpath='LoadBalancerId', namespace=self.namespace) name = findtext(element=el, xpath='LoadBalancerName', namespace=self.namespace) status = findtext(element=el, xpath='LoadBalancerStatus', namespace=self.namespace) state = STATE_MAPPINGS.get(status, State.UNKNOWN) address = findtext(element=el, xpath='Address', namespace=self.namespace) extra = self._get_extra_dict( el, RESOURCE_EXTRA_ATTRIBUTES_MAP['balancer']) balancer = LoadBalancer(id=_id, name=name, state=state, ip=address, port=None, driver=self, extra=extra) return balancer def _create_list_params(self, params, items, label): """ return parameter list """ if isinstance(items, str): items = [items] for index, item in enumerate(items): params[label % (index + 1)] = item return params def _get_extra_dict(self, element, mapping): """ Extract attributes from the element based on rules provided in the mapping dictionary. :param element: Element to parse the values from. :type element: xml.etree.ElementTree.Element. :param mapping: Dictionary with the extra layout :type node: :class:`Node` :rtype: ``dict`` """ extra = {} for attribute, values in mapping.items(): transform_func = values['transform_func'] value = findattr(element=element, xpath=values['xpath'], namespace=self.namespace) if value: try: extra[attribute] = transform_func(value) except Exception: extra[attribute] = None else: extra[attribute] = value return extra def _to_servers_json(self, members): servers = [] for each in members: server = {'ServerId': each.id, 'Weight': '100'} if 'Weight' in each.extra: server['Weight'] = each.extra['Weight'] servers.append(server) try: return json.dumps(servers) except Exception: raise AttributeError('could not convert member to backend server') def _to_balancer_attribute(self, element): balancer = self._to_balancer(element) port_proto_elements = findall( element, 'ListenerPortsAndProtocol/ListenerPortAndProtocol', namespace=self.namespace) if len(port_proto_elements) > 0: listeners = [self._to_port_and_protocol(el) for el in port_proto_elements] else: port_elements = findall(element, 'ListenerPorts/ListenerPort', namespace=self.namespace) listeners = [{'ListenerPort': el.text, 'ListenerProtocol': 'http'} for el in port_elements] server_elements = findall(element, 'BackendServers/BackendServer', namespace=self.namespace) backend_servers = [self._to_server_and_weight(el) for el in server_elements] return SLBLoadBalancerAttribute(balancer, listeners, backend_servers) def _to_port_and_protocol(self, el): port = findtext(el, 'ListenerPort', namespace=self.namespace) protocol = findtext(el, 'ListenerProtocol', namespace=self.namespace) return {'ListenerPort': port, 'ListenerProtocol': protocol} def _to_server_and_weight(self, el): server_id = findtext(el, 'ServerId', namespace=self.namespace) weight = findtext(el, 'Weight', namespace=self.namespace) return {'ServerId': server_id, 'Weight': weight} def _to_server_certificate(self, el): _id = findtext(el, 'ServerCertificateId', namespace=self.namespace) name = findtext(el, 'ServerCertificateName', namespace=self.namespace) fingerprint = findtext(el, 'Fingerprint', namespace=self.namespace) return SLBServerCertificate(id=_id, name=name, fingerprint=fingerprint) def _list_to_json(self, value): try: return json.dumps(value) except Exception: return '[]' apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/rackspace.py0000664000175000017500000015706313153541406024624 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.misc import reverse_dict from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM from libcloud.compute.drivers.rackspace import RackspaceConnection from libcloud.common.types import LibcloudError from libcloud.common.base import JsonResponse, PollingConnection from libcloud.loadbalancer.types import State, MemberCondition from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.rackspace import AUTH_URL ENDPOINT_ARGS_MAP = { 'dfw': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'DFW'}, 'ord': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'ORD'}, 'iad': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'IAD'}, 'lon': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'LON'}, 'syd': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'SYD'}, 'hkg': {'service_type': 'rax:load-balancer', 'name': 'cloudLoadBalancers', 'region': 'HKG'}, } class RackspaceResponse(JsonResponse): def parse_body(self): if not self.body: return None return super(RackspaceResponse, self).parse_body() def success(self): return 200 <= int(self.status) <= 299 class RackspaceHealthMonitor(object): """ :param type: type of load balancer. currently CONNECT (connection monitoring), HTTP, HTTPS (connection and HTTP monitoring) are supported. :type type: ``str`` :param delay: minimum seconds to wait before executing the health monitor. (Must be between 1 and 3600) :type delay: ``int`` :param timeout: maximum seconds to wait when establishing a connection before timing out. (Must be between 1 and 3600) :type timeout: ``int`` :param attempts_before_deactivation: Number of monitor failures before removing a node from rotation. (Must be between 1 and 10) :type attempts_before_deactivation: ``int`` """ def __init__(self, type, delay, timeout, attempts_before_deactivation): self.type = type self.delay = delay self.timeout = timeout self.attempts_before_deactivation = attempts_before_deactivation def __repr__(self): return ('' % (self.type, self.delay, self.timeout, self.attempts_before_deactivation)) def _to_dict(self): return { 'type': self.type, 'delay': self.delay, 'timeout': self.timeout, 'attemptsBeforeDeactivation': self.attempts_before_deactivation } class RackspaceHTTPHealthMonitor(RackspaceHealthMonitor): """ A HTTP health monitor adds extra features to a Rackspace health monitor. :param path: the HTTP path to monitor. :type path: ``str`` :param body_regex: Regular expression used to evaluate the body of the HTTP response. :type body_regex: ``str`` :param status_regex: Regular expression used to evaluate the HTTP status code of the response. :type status_regex: ``str`` """ def __init__(self, type, delay, timeout, attempts_before_deactivation, path, body_regex, status_regex): super(RackspaceHTTPHealthMonitor, self).__init__( type, delay, timeout, attempts_before_deactivation) self.path = path self.body_regex = body_regex self.status_regex = status_regex def __repr__(self): return ('' % (self.type, self.delay, self.timeout, self.attempts_before_deactivation, self.path, self.body_regex, self.status_regex)) def _to_dict(self): super_dict = super(RackspaceHTTPHealthMonitor, self)._to_dict() super_dict['path'] = self.path super_dict['statusRegex'] = self.status_regex if self.body_regex: super_dict['bodyRegex'] = self.body_regex return super_dict class RackspaceConnectionThrottle(object): """ :param min_connections: Minimum number of connections per IP address before applying throttling. :type min_connections: ``int`` :param max_connections: Maximum number of connections per IP address. (Must be between 0 and 100000, 0 allows an unlimited number of connections.) :type max_connections: ``int`` :param max_connection_rate: Maximum number of connections allowed from a single IP address within the given rate_interval_seconds. (Must be between 0 and 100000, 0 allows an unlimited number of connections.) :type max_connection_rate: ``int`` :param rate_interval_seconds: Interval at which the max_connection_rate is enforced. (Must be between 1 and 3600.) :type rate_interval_seconds: ``int`` """ def __init__(self, min_connections, max_connections, max_connection_rate, rate_interval_seconds): self.min_connections = min_connections self.max_connections = max_connections self.max_connection_rate = max_connection_rate self.rate_interval_seconds = rate_interval_seconds def __repr__(self): return ('' % (self.min_connections, self.max_connections, self.max_connection_rate, self.rate_interval_seconds)) def _to_dict(self): return { 'maxConnections': self.max_connections, 'minConnections': self.min_connections, 'maxConnectionRate': self.max_connection_rate, 'rateInterval': self.rate_interval_seconds } class RackspaceAccessRuleType(object): ALLOW = 0 DENY = 1 _RULE_TYPE_STRING_MAP = { ALLOW: 'ALLOW', DENY: 'DENY' } class RackspaceAccessRule(object): """ An access rule allows or denies traffic to a Load Balancer based on the incoming IPs. :param id: Unique identifier to refer to this rule by. :type id: ``str`` :param rule_type: RackspaceAccessRuleType.ALLOW or RackspaceAccessRuleType.DENY. :type id: ``int`` :param address: IP address or cidr (can be IPv4 or IPv6). :type address: ``str`` """ def __init__(self, id=None, rule_type=None, address=None): self.id = id self.rule_type = rule_type self.address = address def _to_dict(self): type_string =\ RackspaceAccessRuleType._RULE_TYPE_STRING_MAP[self.rule_type] as_dict = { 'type': type_string, 'address': self.address } if self.id is not None: as_dict['id'] = self.id return as_dict class RackspaceConnection(RackspaceConnection, PollingConnection): responseCls = RackspaceResponse auth_url = AUTH_URL poll_interval = 2 timeout = 80 cache_busting = True def request(self, action, params=None, data='', headers=None, method='GET'): if not headers: headers = {} if not params: params = {} if method in ('POST', 'PUT'): headers['Content-Type'] = 'application/json' return super(RackspaceConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def get_poll_request_kwargs(self, response, context, request_kwargs): return {'action': request_kwargs['action'], 'method': 'GET'} def has_completed(self, response): state = response.object['loadBalancer']['status'] if state == 'ERROR': raise LibcloudError("Load balancer entered an ERROR state.", driver=self.driver) return state == 'ACTIVE' def encode_data(self, data): return data class RackspaceLBDriver(Driver, OpenStackDriverMixin): connectionCls = RackspaceConnection api_name = 'rackspace_lb' name = 'Rackspace LB' website = 'http://www.rackspace.com/' LB_STATE_MAP = { 'ACTIVE': State.RUNNING, 'BUILD': State.PENDING, 'ERROR': State.ERROR, 'DELETED': State.DELETED, 'PENDING_UPDATE': State.PENDING, 'PENDING_DELETE': State.PENDING } LB_MEMBER_CONDITION_MAP = { 'ENABLED': MemberCondition.ENABLED, 'DISABLED': MemberCondition.DISABLED, 'DRAINING': MemberCondition.DRAINING } CONDITION_LB_MEMBER_MAP = reverse_dict(LB_MEMBER_CONDITION_MAP) _VALUE_TO_ALGORITHM_MAP = { 'RANDOM': Algorithm.RANDOM, 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS, 'WEIGHTED_ROUND_ROBIN': Algorithm.WEIGHTED_ROUND_ROBIN, 'WEIGHTED_LEAST_CONNECTIONS': Algorithm.WEIGHTED_LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) def __init__(self, key, secret=None, secure=True, host=None, port=None, region='ord', **kwargs): ex_force_region = kwargs.pop('ex_force_region', None) if ex_force_region: # For backward compatibility region = ex_force_region OpenStackDriverMixin.__init__(self, **kwargs) super(RackspaceLBDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, region=region) @classmethod def list_regions(cls): return ENDPOINT_ARGS_MAP.keys() def _ex_connection_class_kwargs(self): endpoint_args = ENDPOINT_ARGS_MAP[self.region] kwargs = self.openstack_connection_kwargs() kwargs['get_endpoint_args'] = endpoint_args return kwargs def list_protocols(self): return self._to_protocols( self.connection.request('/loadbalancers/protocols').object) def ex_list_protocols_with_default_ports(self): """ List protocols with default ports. :rtype: ``list`` of ``tuple`` :return: A list of protocols with default ports included. """ return self._to_protocols_with_default_ports( self.connection.request('/loadbalancers/protocols').object) def list_balancers(self, ex_member_address=None, ex_status=None, ex_changes_since=None, ex_params={}): """ @inherits: :class:`Driver.list_balancers` :param ex_member_address: Optional IP address of the attachment member. If provided, only the load balancers which have this member attached will be returned. :type ex_member_address: ``str`` :param ex_status: Optional. Filter balancers by status :type ex_status: ``str`` :param ex_changes_since: Optional. List all load balancers that have changed since the specified date/time :type ex_changes_since: ``str`` :param ex_params: Optional. Set parameters to be submitted to the API in the query string :type ex_params: ``dict`` """ params = {} if ex_member_address: params['nodeaddress'] = ex_member_address if ex_status: params['status'] = ex_status if ex_changes_since: params['changes-since'] = ex_changes_since for key, value in ex_params.items(): params[key] = value return self._to_balancers( self.connection.request('/loadbalancers', params=params).object) def create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM): return self.ex_create_balancer(name, members, protocol, port, algorithm) def ex_create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM, vip='PUBLIC'): """ Creates a new load balancer instance :param name: Name of the new load balancer (required) :type name: ``str`` :param members: ``list`` of:class:`Member`s to attach to balancer :type members: ``list`` of :class:`Member` :param protocol: Loadbalancer protocol, defaults to http. :type protocol: ``str`` :param port: Port the load balancer should listen on, defaults to 80 :type port: ``str`` :param algorithm: Load balancing algorithm, defaults to LBAlgorithm.ROUND_ROBIN :type algorithm: :class:`Algorithm` :param vip: Virtual ip type of PUBLIC, SERVICENET, or ID of a virtual ip :type vip: ``str`` :rtype: :class:`LoadBalancer` """ balancer_attrs = self._kwargs_to_mutable_attrs( name=name, protocol=protocol, port=port, algorithm=algorithm, vip=vip) balancer_attrs.update({ 'nodes': [self._member_attributes(member) for member in members], }) # balancer_attrs['nodes'] = ['fu'] balancer_object = {"loadBalancer": balancer_attrs} resp = self.connection.request('/loadbalancers', method='POST', data=json.dumps(balancer_object)) return self._to_balancer(resp.object['loadBalancer']) def _member_attributes(self, member): member_attributes = {'address': member.ip, 'port': member.port} member_attributes.update(self._kwargs_to_mutable_member_attrs( **member.extra)) # If the condition is not specified on the member, then it should be # set to ENABLED by default if 'condition' not in member_attributes: member_attributes['condition'] =\ self.CONDITION_LB_MEMBER_MAP[MemberCondition.ENABLED] return member_attributes def destroy_balancer(self, balancer): uri = '/loadbalancers/%s' % (balancer.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_destroy_balancers(self, balancers): """ Destroys a list of Balancers (the API supports up to 10). :param balancers: A list of Balancers to destroy. :type balancers: ``list`` of :class:`LoadBalancer` :return: Returns whether the destroy request was accepted. :rtype: ``bool`` """ ids = [('id', balancer.id) for balancer in balancers] resp = self.connection.request('/loadbalancers', method='DELETE', params=ids) return resp.status == httplib.ACCEPTED def get_balancer(self, balancer_id): uri = '/loadbalancers/%s' % (balancer_id) resp = self.connection.request(uri) return self._to_balancer(resp.object["loadBalancer"]) def balancer_attach_member(self, balancer, member): member_object = {"nodes": [self._member_attributes(member)]} uri = '/loadbalancers/%s/nodes' % (balancer.id) resp = self.connection.request(uri, method='POST', data=json.dumps(member_object)) return self._to_members(resp.object, balancer)[0] def ex_balancer_attach_members(self, balancer, members): """ Attaches a list of members to a load balancer. :param balancer: The Balancer to which members will be attached. :type balancer: :class:`LoadBalancer` :param members: A list of Members to attach. :type members: ``list`` of :class:`Member` :rtype: ``list`` of :class:`Member` """ member_objects = {"nodes": [self._member_attributes(member) for member in members]} uri = '/loadbalancers/%s/nodes' % (balancer.id) resp = self.connection.request(uri, method='POST', data=json.dumps(member_objects)) return self._to_members(resp.object, balancer) def balancer_detach_member(self, balancer, member): # Loadbalancer always needs to have at least 1 member. # Last member cannot be detached. You can only disable it or destroy # the balancer. uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_balancer_detach_members(self, balancer, members): """ Detaches a list of members from a balancer (the API supports up to 10). This method blocks until the detach request has been processed and the balancer is in a RUNNING state again. :param balancer: The Balancer to detach members from. :type balancer: :class:`LoadBalancer` :param members: A list of Members to detach. :type members: ``list`` of :class:`Member` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_balancer_detach_members_no_poll(balancer, members) if not accepted: msg = 'Detach members request was not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_balancer_detach_members_no_poll(self, balancer, members): """ Detaches a list of members from a balancer (the API supports up to 10). This method returns immediately. :param balancer: The Balancer to detach members from. :type balancer: :class:`LoadBalancer` :param members: A list of Members to detach. :type members: ``list`` of :class:`Member` :return: Returns whether the detach request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/nodes' % (balancer.id) ids = [('id', member.id) for member in members] resp = self.connection.request(uri, method='DELETE', params=ids) return resp.status == httplib.ACCEPTED def balancer_list_members(self, balancer): uri = '/loadbalancers/%s/nodes' % (balancer.id) data = self.connection.request(uri).object return self._to_members(data, balancer) def update_balancer(self, balancer, **kwargs): attrs = self._kwargs_to_mutable_attrs(**kwargs) resp = self.connection.async_request( action='/loadbalancers/%s' % balancer.id, method='PUT', data=json.dumps(attrs)) return self._to_balancer(resp.object["loadBalancer"]) def ex_update_balancer_no_poll(self, balancer, **kwargs): """ Update balancer no poll. @inherits: :class:`Driver.update_balancer` """ attrs = self._kwargs_to_mutable_attrs(**kwargs) resp = self.connection.request( action='/loadbalancers/%s' % balancer.id, method='PUT', data=json.dumps(attrs)) return resp.status == httplib.ACCEPTED def ex_balancer_update_member(self, balancer, member, **kwargs): """ Updates a Member's extra attributes for a Balancer. The attributes can include 'weight' or 'condition'. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to update the member on. :type balancer: :class:`LoadBalancer` :param member: Member which should be used :type member: :class:`Member` :keyword **kwargs: New attributes. Should contain either 'weight' or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'. or 'DRAINING'. 'weight' can be set to a positive integer between 1 and 100, with a higher weight indicating that the node will receive more traffic (assuming the Balancer is using a weighted algorithm). :type **kwargs: ``dict`` :return: Updated Member. :rtype: :class:`Member` """ accepted = self.ex_balancer_update_member_no_poll( balancer, member, **kwargs) if not accepted: msg = 'Update member attributes was not accepted' raise LibcloudError(msg, driver=self) balancer = self._get_updated_balancer(balancer) members = balancer.extra['members'] updated_members = [m for m in members if m.id == member.id] if not updated_members: raise LibcloudError('Could not find updated member') return updated_members[0] def ex_balancer_update_member_no_poll(self, balancer, member, **kwargs): """ Updates a Member's extra attributes for a Balancer. The attribute can include 'weight' or 'condition'. This method returns immediately. :param balancer: Balancer to update the member on. :type balancer: :class:`LoadBalancer` :param member: Member which should be used :type member: :class:`Member` :keyword **kwargs: New attributes. Should contain either 'weight' or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'. or 'DRAINING'. 'weight' can be set to a positive integer between 1 and 100, with a higher weight indicating that the node will receive more traffic (assuming the Balancer is using a weighted algorithm). :type **kwargs: ``dict`` :return: Returns whether the update request was accepted. :rtype: ``bool`` """ resp = self.connection.request( action='/loadbalancers/%s/nodes/%s' % (balancer.id, member.id), method='PUT', data=json.dumps(self._kwargs_to_mutable_member_attrs(**kwargs)) ) return resp.status == httplib.ACCEPTED def ex_list_algorithm_names(self): """ Lists algorithms supported by the API. Returned as strings because this list may change in the future. :rtype: ``list`` of ``str`` """ response = self.connection.request('/loadbalancers/algorithms') return [a["name"].upper() for a in response.object["algorithms"]] def ex_get_balancer_error_page(self, balancer): """ List error page configured for the specified load balancer. :param balancer: Balancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``str`` """ uri = '/loadbalancers/%s/errorpage' % (balancer.id) resp = self.connection.request(uri) return resp.object["errorpage"]["content"] def ex_balancer_access_list(self, balancer): """ List the access list. :param balancer: Balancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``list`` of :class:`RackspaceAccessRule` """ uri = '/loadbalancers/%s/accesslist' % (balancer.id) resp = self.connection.request(uri) return [self._to_access_rule(el) for el in resp.object["accessList"]] def _get_updated_balancer(self, balancer): """ Updating a balancer's attributes puts a balancer into 'PENDING_UPDATE' status. Wait until the balancer is back in 'ACTIVE' status and then return the individual balancer details call. """ resp = self.connection.async_request( action='/loadbalancers/%s' % balancer.id, method='GET') return self._to_balancer(resp.object['loadBalancer']) def ex_update_balancer_health_monitor(self, balancer, health_monitor): """ Sets a Balancer's health monitor. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to update. :type balancer: :class:`LoadBalancer` :param health_monitor: Health Monitor for the balancer. :type health_monitor: :class:`RackspaceHealthMonitor` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_update_balancer_health_monitor_no_poll( balancer, health_monitor) if not accepted: msg = 'Update health monitor request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_update_balancer_health_monitor_no_poll(self, balancer, health_monitor): """ Sets a Balancer's health monitor. This method returns immediately. :param balancer: Balancer to update health monitor on. :type balancer: :class:`LoadBalancer` :param health_monitor: Health Monitor for the balancer. :type health_monitor: :class:`RackspaceHealthMonitor` :return: Returns whether the update request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/healthmonitor' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps(health_monitor._to_dict())) return resp.status == httplib.ACCEPTED def ex_disable_balancer_health_monitor(self, balancer): """ Disables a Balancer's health monitor. This method blocks until the disable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to disable health monitor on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_disable_balancer_health_monitor_no_poll(balancer): msg = 'Disable health monitor request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_disable_balancer_health_monitor_no_poll(self, balancer): """ Disables a Balancer's health monitor. This method returns immediately. :param balancer: Balancer to disable health monitor on. :type balancer: :class:`LoadBalancer` :return: Returns whether the disable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/healthmonitor' % (balancer.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_update_balancer_connection_throttle(self, balancer, connection_throttle): """ Updates a Balancer's connection throttle. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to update connection throttle on. :type balancer: :class:`LoadBalancer` :param connection_throttle: Connection Throttle for the balancer. :type connection_throttle: :class:`RackspaceConnectionThrottle` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_update_balancer_connection_throttle_no_poll( balancer, connection_throttle) if not accepted: msg = 'Update connection throttle request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_update_balancer_connection_throttle_no_poll(self, balancer, connection_throttle): """ Sets a Balancer's connection throttle. This method returns immediately. :param balancer: Balancer to update connection throttle on. :type balancer: :class:`LoadBalancer` :param connection_throttle: Connection Throttle for the balancer. :type connection_throttle: :class:`RackspaceConnectionThrottle` :return: Returns whether the update request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps(connection_throttle._to_dict())) return resp.status == httplib.ACCEPTED def ex_disable_balancer_connection_throttle(self, balancer): """ Disables a Balancer's connection throttle. This method blocks until the disable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to disable connection throttle on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_disable_balancer_connection_throttle_no_poll(balancer): msg = 'Disable connection throttle request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_disable_balancer_connection_throttle_no_poll(self, balancer): """ Disables a Balancer's connection throttle. This method returns immediately. :param balancer: Balancer to disable connection throttle on. :type balancer: :class:`LoadBalancer` :return: Returns whether the disable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_enable_balancer_connection_logging(self, balancer): """ Enables connection logging for a Balancer. This method blocks until the enable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to enable connection logging on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_enable_balancer_connection_logging_no_poll(balancer): msg = 'Enable connection logging request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_enable_balancer_connection_logging_no_poll(self, balancer): """ Enables connection logging for a Balancer. This method returns immediately. :param balancer: Balancer to enable connection logging on. :type balancer: :class:`LoadBalancer` :return: Returns whether the enable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/connectionlogging' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps({'connectionLogging': {'enabled': True}}) ) return resp.status == httplib.ACCEPTED def ex_disable_balancer_connection_logging(self, balancer): """ Disables connection logging for a Balancer. This method blocks until the enable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to disable connection logging on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_disable_balancer_connection_logging_no_poll(balancer): msg = 'Disable connection logging request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_disable_balancer_connection_logging_no_poll(self, balancer): """ Disables connection logging for a Balancer. This method returns immediately. :param balancer: Balancer to disable connection logging on. :type balancer: :class:`LoadBalancer` :return: Returns whether the disable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/connectionlogging' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps({'connectionLogging': {'enabled': False}}) ) return resp.status == httplib.ACCEPTED def ex_enable_balancer_session_persistence(self, balancer): """ Enables session persistence for a Balancer by setting the persistence type to 'HTTP_COOKIE'. This method blocks until the enable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to enable session persistence on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_enable_balancer_session_persistence_no_poll(balancer): msg = 'Enable session persistence request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_enable_balancer_session_persistence_no_poll(self, balancer): """ Enables session persistence for a Balancer by setting the persistence type to 'HTTP_COOKIE'. This method returns immediately. :param balancer: Balancer to enable session persistence on. :type balancer: :class:`LoadBalancer` :return: Returns whether the enable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps( {'sessionPersistence': {'persistenceType': 'HTTP_COOKIE'}}) ) return resp.status == httplib.ACCEPTED def ex_disable_balancer_session_persistence(self, balancer): """ Disables session persistence for a Balancer. This method blocks until the disable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to disable session persistence on. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_disable_balancer_session_persistence_no_poll(balancer): msg = 'Disable session persistence request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_disable_balancer_session_persistence_no_poll(self, balancer): """ Disables session persistence for a Balancer. This method returns immediately. :param balancer: Balancer to disable session persistence for. :type balancer: :class:`LoadBalancer` :return: Returns whether the disable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_update_balancer_error_page(self, balancer, page_content): """ Updates a Balancer's custom error page. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to update the custom error page for. :type balancer: :class:`LoadBalancer` :param page_content: HTML content for the custom error page. :type page_content: ``str`` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_update_balancer_error_page_no_poll(balancer, page_content) if not accepted: msg = 'Update error page request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_update_balancer_error_page_no_poll(self, balancer, page_content): """ Updates a Balancer's custom error page. This method returns immediately. :param balancer: Balancer to update the custom error page for. :type balancer: :class:`LoadBalancer` :param page_content: HTML content for the custom error page. :type page_content: ``str`` :return: Returns whether the update request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/errorpage' % (balancer.id) resp = self.connection.request( uri, method='PUT', data=json.dumps({'errorpage': {'content': page_content}}) ) return resp.status == httplib.ACCEPTED def ex_disable_balancer_custom_error_page(self, balancer): """ Disables a Balancer's custom error page, returning its error page to the Rackspace-provided default. This method blocks until the disable request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to disable the custom error page for. :type balancer: :class:`LoadBalancer` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ if not self.ex_disable_balancer_custom_error_page_no_poll(balancer): msg = 'Disable custom error page request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_disable_balancer_custom_error_page_no_poll(self, balancer): """ Disables a Balancer's custom error page, returning its error page to the Rackspace-provided default. This method returns immediately. :param balancer: Balancer to disable the custom error page for. :type balancer: :class:`LoadBalancer` :return: Returns whether the disable request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/errorpage' % (balancer.id) resp = self.connection.request(uri, method='DELETE') # Load Balancer API currently returns 200 OK on custom error page # delete. return resp.status == httplib.OK or resp.status == httplib.ACCEPTED def ex_create_balancer_access_rule(self, balancer, rule): """ Adds an access rule to a Balancer's access list. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to create the access rule for. :type balancer: :class:`LoadBalancer` :param rule: Access Rule to add to the balancer. :type rule: :class:`RackspaceAccessRule` :return: The created access rule. :rtype: :class:`RackspaceAccessRule` """ accepted = self.ex_create_balancer_access_rule_no_poll(balancer, rule) if not accepted: msg = 'Create access rule not accepted' raise LibcloudError(msg, driver=self) balancer = self._get_updated_balancer(balancer) access_list = balancer.extra['accessList'] created_rule = self._find_matching_rule(rule, access_list) if not created_rule: raise LibcloudError('Could not find created rule') return created_rule def ex_create_balancer_access_rule_no_poll(self, balancer, rule): """ Adds an access rule to a Balancer's access list. This method returns immediately. :param balancer: Balancer to create the access rule for. :type balancer: :class:`LoadBalancer` :param rule: Access Rule to add to the balancer. :type rule: :class:`RackspaceAccessRule` :return: Returns whether the create request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/accesslist' % (balancer.id) resp = self.connection.request( uri, method='POST', data=json.dumps({'networkItem': rule._to_dict()}) ) return resp.status == httplib.ACCEPTED def ex_create_balancer_access_rules(self, balancer, rules): """ Adds a list of access rules to a Balancer's access list. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to create the access rule for. :type balancer: :class:`LoadBalancer` :param rules: List of :class:`RackspaceAccessRule` to add to the balancer. :type rules: ``list`` of :class:`RackspaceAccessRule` :return: The created access rules. :rtype: :class:`RackspaceAccessRule` """ accepted = self.ex_create_balancer_access_rules_no_poll(balancer, rules) if not accepted: msg = 'Create access rules not accepted' raise LibcloudError(msg, driver=self) balancer = self._get_updated_balancer(balancer) access_list = balancer.extra['accessList'] created_rules = [] for r in rules: matched_rule = self._find_matching_rule(r, access_list) if matched_rule: created_rules.append(matched_rule) if len(created_rules) != len(rules): raise LibcloudError('Could not find all created rules') return created_rules def _find_matching_rule(self, rule_to_find, access_list): """ LB API does not return the ID for the newly created rules, so we have to search the list to find the rule with a matching rule type and address to return an object with the right identifier.it. The API enforces rule type and address uniqueness. """ for r in access_list: if rule_to_find.rule_type == r.rule_type and\ rule_to_find.address == r.address: return r return None def ex_create_balancer_access_rules_no_poll(self, balancer, rules): """ Adds a list of access rules to a Balancer's access list. This method returns immediately. :param balancer: Balancer to create the access rule for. :type balancer: :class:`LoadBalancer` :param rules: List of :class:`RackspaceAccessRule` to add to the balancer. :type rules: ``list`` of :class:`RackspaceAccessRule` :return: Returns whether the create request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/accesslist' % (balancer.id) resp = self.connection.request( uri, method='POST', data=json.dumps({'accessList': [rule._to_dict() for rule in rules]}) ) return resp.status == httplib.ACCEPTED def ex_destroy_balancer_access_rule(self, balancer, rule): """ Removes an access rule from a Balancer's access list. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to remove the access rule from. :type balancer: :class:`LoadBalancer` :param rule: Access Rule to remove from the balancer. :type rule: :class:`RackspaceAccessRule` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule) if not accepted: msg = 'Delete access rule not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_destroy_balancer_access_rule_no_poll(self, balancer, rule): """ Removes an access rule from a Balancer's access list. This method returns immediately. :param balancer: Balancer to remove the access rule from. :type balancer: :class:`LoadBalancer` :param rule: Access Rule to remove from the balancer. :type rule: :class:`RackspaceAccessRule` :return: Returns whether the destroy request was accepted. :rtype: ``bool`` """ uri = '/loadbalancers/%s/accesslist/%s' % (balancer.id, rule.id) resp = self.connection.request(uri, method='DELETE') return resp.status == httplib.ACCEPTED def ex_destroy_balancer_access_rules(self, balancer, rules): """ Removes a list of access rules from a Balancer's access list. This method blocks until the update request has been processed and the balancer is in a RUNNING state again. :param balancer: Balancer to remove the access rules from. :type balancer: :class:`LoadBalancer` :param rules: List of :class:`RackspaceAccessRule` objects to remove from the balancer. :type rules: ``list`` of :class:`RackspaceAccessRule` :return: Updated Balancer. :rtype: :class:`LoadBalancer` """ accepted = self.ex_destroy_balancer_access_rules_no_poll( balancer, rules) if not accepted: msg = 'Destroy access rules request not accepted' raise LibcloudError(msg, driver=self) return self._get_updated_balancer(balancer) def ex_destroy_balancer_access_rules_no_poll(self, balancer, rules): """ Removes a list of access rules from a Balancer's access list. This method returns immediately. :param balancer: Balancer to remove the access rules from. :type balancer: :class:`LoadBalancer` :param rules: List of :class:`RackspaceAccessRule` objects to remove from the balancer. :type rules: ``list`` of :class:`RackspaceAccessRule` :return: Returns whether the destroy request was accepted. :rtype: ``bool`` """ ids = [('id', rule.id) for rule in rules] uri = '/loadbalancers/%s/accesslist' % balancer.id resp = self.connection.request(uri, method='DELETE', params=ids) return resp.status == httplib.ACCEPTED def ex_list_current_usage(self, balancer): """ Return current load balancer usage report. :param balancer: Balancer to remove the access rules from. :type balancer: :class:`LoadBalancer` :return: Raw load balancer usage object. :rtype: ``dict`` """ uri = '/loadbalancers/%s/usage/current' % (balancer.id) resp = self.connection.request(uri, method='GET') return resp.object def _to_protocols(self, object): protocols = [] for item in object["protocols"]: protocols.append(item['name'].lower()) return protocols def _to_protocols_with_default_ports(self, object): protocols = [] for item in object["protocols"]: name = item['name'].lower() port = int(item['port']) protocols.append((name, port)) return protocols def _to_balancers(self, object): return [self._to_balancer(el) for el in object["loadBalancers"]] def _to_balancer(self, el): ip = None port = None sourceAddresses = {} if 'port' in el: port = el["port"] if 'sourceAddresses' in el: sourceAddresses = el['sourceAddresses'] extra = { "ipv6PublicSource": sourceAddresses.get("ipv6Public"), "ipv4PublicSource": sourceAddresses.get("ipv4Public"), "ipv4PrivateSource": sourceAddresses.get("ipv4Servicenet"), "service_name": self.connection.get_service_name(), "uri": "https://%s%s/loadbalancers/%s" % ( self.connection.host, self.connection.request_path, el["id"]), } if 'virtualIps' in el: ip = el['virtualIps'][0]['address'] extra['virtualIps'] = el['virtualIps'] if 'protocol' in el: extra['protocol'] = el['protocol'] if 'algorithm' in el and \ el["algorithm"] in self._VALUE_TO_ALGORITHM_MAP: extra["algorithm"] = self._value_to_algorithm(el["algorithm"]) if 'healthMonitor' in el: health_monitor = self._to_health_monitor(el) if health_monitor: extra["healthMonitor"] = health_monitor if 'connectionThrottle' in el: extra["connectionThrottle"] = self._to_connection_throttle(el) if 'sessionPersistence' in el: persistence = el["sessionPersistence"] extra["sessionPersistenceType"] =\ persistence.get("persistenceType") if 'connectionLogging' in el: logging = el["connectionLogging"] extra["connectionLoggingEnabled"] = logging.get("enabled") if 'nodes' in el: extra['members'] = self._to_members(el) if 'created' in el: extra['created'] = self._iso_to_datetime(el['created']['time']) if 'updated' in el: extra['updated'] = self._iso_to_datetime(el['updated']['time']) if 'accessList' in el: extra['accessList'] = [self._to_access_rule(rule) for rule in el['accessList']] return LoadBalancer(id=el["id"], name=el["name"], state=self.LB_STATE_MAP.get( el["status"], State.UNKNOWN), ip=ip, port=port, driver=self.connection.driver, extra=extra) def _to_members(self, object, balancer=None): return [self._to_member(el, balancer) for el in object["nodes"]] def _to_member(self, el, balancer=None): extra = {} if 'weight' in el: extra['weight'] = el["weight"] if 'condition' in el and\ el['condition'] in self.LB_MEMBER_CONDITION_MAP: extra['condition'] =\ self.LB_MEMBER_CONDITION_MAP.get(el["condition"]) if 'status' in el: extra['status'] = el["status"] lbmember = Member(id=el["id"], ip=el["address"], port=el["port"], balancer=balancer, extra=extra) return lbmember def _protocol_to_value(self, protocol): non_standard_protocols = {'imapv2': 'IMAPv2', 'imapv3': 'IMAPv3', 'imapv4': 'IMAPv4'} protocol_name = protocol.lower() if protocol_name in non_standard_protocols: protocol_value = non_standard_protocols[protocol_name] else: protocol_value = protocol.upper() return protocol_value def _kwargs_to_mutable_attrs(self, **attrs): update_attrs = {} if "name" in attrs: update_attrs['name'] = attrs['name'] if "algorithm" in attrs: algorithm_value = self._algorithm_to_value(attrs['algorithm']) update_attrs['algorithm'] = algorithm_value if "protocol" in attrs: update_attrs['protocol'] =\ self._protocol_to_value(attrs['protocol']) if "port" in attrs: update_attrs['port'] = int(attrs['port']) if "vip" in attrs: if attrs['vip'] == 'PUBLIC' or attrs['vip'] == 'SERVICENET': update_attrs['virtualIps'] = [{'type': attrs['vip']}] else: update_attrs['virtualIps'] = [{'id': attrs['vip']}] return update_attrs def _kwargs_to_mutable_member_attrs(self, **attrs): update_attrs = {} if 'condition' in attrs: update_attrs['condition'] =\ self.CONDITION_LB_MEMBER_MAP.get(attrs['condition']) if 'weight' in attrs: update_attrs['weight'] = attrs['weight'] return update_attrs def _to_health_monitor(self, el): health_monitor_data = el["healthMonitor"] type = health_monitor_data.get("type") delay = health_monitor_data.get("delay") timeout = health_monitor_data.get("timeout") attempts_before_deactivation =\ health_monitor_data.get("attemptsBeforeDeactivation") if type == "CONNECT": return RackspaceHealthMonitor( type=type, delay=delay, timeout=timeout, attempts_before_deactivation=attempts_before_deactivation) if type == "HTTP" or type == "HTTPS": return RackspaceHTTPHealthMonitor( type=type, delay=delay, timeout=timeout, attempts_before_deactivation=attempts_before_deactivation, path=health_monitor_data.get("path"), status_regex=health_monitor_data.get("statusRegex"), body_regex=health_monitor_data.get("bodyRegex", '')) return None def _to_connection_throttle(self, el): connection_throttle_data = el["connectionThrottle"] min_connections = connection_throttle_data.get("minConnections") max_connections = connection_throttle_data.get("maxConnections") max_connection_rate = connection_throttle_data.get("maxConnectionRate") rate_interval = connection_throttle_data.get("rateInterval") return RackspaceConnectionThrottle( min_connections=min_connections, max_connections=max_connections, max_connection_rate=max_connection_rate, rate_interval_seconds=rate_interval) def _to_access_rule(self, el): return RackspaceAccessRule( id=el.get("id"), rule_type=self._to_access_rule_type(el.get("type")), address=el.get("address")) def _to_access_rule_type(self, type): if type == "ALLOW": return RackspaceAccessRuleType.ALLOW elif type == "DENY": return RackspaceAccessRuleType.DENY def _iso_to_datetime(self, isodate): date_formats = ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S%z') date = None for date_format in date_formats: try: date = datetime.strptime(isodate, date_format) except ValueError: pass if date: break return date apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/elb.py0000664000175000017500000003365613153541406023433 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'ElasticLBDriver' ] from libcloud.utils.py3 import httplib from libcloud.utils.xml import findtext, findall from libcloud.loadbalancer.types import State from libcloud.loadbalancer.base import Driver, LoadBalancer, Member from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection VERSION = '2012-06-01' HOST = 'elasticloadbalancing.%s.amazonaws.com' ROOT = '/%s/' % (VERSION) NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, ) class ELBResponse(AWSGenericResponse): """ Amazon ELB response class. """ namespace = NS exceptions = {} xpath = 'Error' class ELBConnection(SignedAWSConnection): version = VERSION host = HOST responseCls = ELBResponse service_name = 'elasticloadbalancing' class ElasticLBDriver(Driver): name = 'Amazon Elastic Load Balancing' website = 'http://aws.amazon.com/elasticloadbalancing/' connectionCls = ELBConnection signature_version = '4' def __init__(self, access_id, secret, region, token=None): self.token = token self.region = region self.region_name = region super(ElasticLBDriver, self).__init__( access_id, secret, token=token, host=HOST % region, region=region ) def list_protocols(self): return ['tcp', 'ssl', 'http', 'https'] def list_balancers(self, ex_fetch_tags=False): params = {'Action': 'DescribeLoadBalancers'} data = self.connection.request(ROOT, params=params).object balancers = self._to_balancers(data) if ex_fetch_tags: for balancer in balancers: self._ex_populate_balancer_tags(balancer) return balancers def create_balancer(self, name, port, protocol, algorithm, members, ex_members_availability_zones=None): if ex_members_availability_zones is None: ex_members_availability_zones = ['a'] params = { 'Action': 'CreateLoadBalancer', 'LoadBalancerName': name, 'Listeners.member.1.InstancePort': str(port), 'Listeners.member.1.InstanceProtocol': protocol.upper(), 'Listeners.member.1.LoadBalancerPort': str(port), 'Listeners.member.1.Protocol': protocol.upper(), } for i, z in enumerate(ex_members_availability_zones): zone = ''.join((self.region, z)) params['AvailabilityZones.member.%d' % (i + 1)] = zone data = self.connection.request(ROOT, params=params).object balancer = LoadBalancer( id=name, name=name, state=State.PENDING, ip=findtext(element=data, xpath='DNSName', namespace=NS), port=port, driver=self.connection.driver ) balancer._members = [] return balancer def destroy_balancer(self, balancer): params = { 'Action': 'DeleteLoadBalancer', 'LoadBalancerName': balancer.id } self.connection.request(ROOT, params=params) return True def get_balancer(self, balancer_id, ex_fetch_tags=False): params = { 'Action': 'DescribeLoadBalancers', 'LoadBalancerNames.member.1': balancer_id } data = self.connection.request(ROOT, params=params).object balancer = self._to_balancers(data)[0] if ex_fetch_tags: balancer = self._ex_populate_balancer_tags(balancer) return balancer def balancer_attach_compute_node(self, balancer, node): params = { 'Action': 'RegisterInstancesWithLoadBalancer', 'LoadBalancerName': balancer.id, 'Instances.member.1.InstanceId': node.id } self.connection.request(ROOT, params=params) balancer._members.append(Member(node.id, None, None, balancer=self)) def balancer_detach_member(self, balancer, member): params = { 'Action': 'DeregisterInstancesFromLoadBalancer', 'LoadBalancerName': balancer.id, 'Instances.member.1.InstanceId': member.id } self.connection.request(ROOT, params=params) balancer._members = [m for m in balancer._members if m.id != member.id] return True def balancer_list_members(self, balancer): return balancer._members def ex_list_balancer_policies(self, balancer): """ Return a list of policy description string. :rtype: ``list`` of ``str`` """ params = { 'Action': 'DescribeLoadBalancerPolicies', 'LoadBalancerName': balancer.id } data = self.connection.request(ROOT, params=params).object return self._to_policies(data) def ex_list_balancer_policy_types(self): """ Return a list of policy type description string. :rtype: ``list`` of ``str`` """ params = {'Action': 'DescribeLoadBalancerPolicyTypes'} data = self.connection.request(ROOT, params=params).object return self._to_policy_types(data) def ex_create_balancer_policy(self, name, policy_name, policy_type, policy_attributes=None): """ Create a new load balancer policy :param name: Balancer name to create the policy for :type name: ``str`` :param policy_name: policy to be created :type policy_name: ``str`` :param policy_type: policy type being used to create policy. :type policy_type: ``str`` :param policy_attributes: Each list contain values, ['AttributeName', 'value'] :type policy_attributes: ``PolicyAttribute list`` """ params = { 'Action': 'CreateLoadBalancerPolicy', 'LoadBalancerName': name, 'PolicyName': policy_name, 'PolicyTypeName': policy_type } if policy_attributes is not None: for index, (name, value) in enumerate( policy_attributes.iteritems(), 1): params['PolicyAttributes.member.%d. \ AttributeName' % (index)] = name params['PolicyAttributes.member.%d. \ AttributeValue' % (index)] = value response = self.connection.request(ROOT, params=params) return response.status == httplib.OK def ex_delete_balancer_policy(self, name, policy_name): """ Delete a load balancer policy :param name: balancer name for which policy will be deleted :type name: ``str`` :param policy_name: The Mnemonic name for the policy being deleted :type policy_name: ``str`` """ params = { 'Action': 'DeleteLoadBalancerPolicy', 'LoadBalancerName': name, 'PolicyName': policy_name } response = self.connection.request(ROOT, params=params) return response.status == httplib.OK def ex_set_balancer_policies_listener(self, name, port, policies): """ Associates, updates, or disables a policy with a listener on the load balancer :param name: balancer name to set policies for listerner :type name: ``str`` :param port: port to use :type port: ``str`` :param policies: List of policies to be associated with the balancer :type policies: ``string list`` """ params = { 'Action': 'SetLoadBalancerPoliciesOfListener', 'LoadBalancerName': name, 'LoadBalancerPort': str(port) } if policies: params = self._create_list_params(params, policies, 'PolicyNames.member.%d') response = self.connection.request(ROOT, params=params) return response.status == httplib.OK def ex_set_balancer_policies_backend_server(self, name, instance_port, policies): """ Replaces the current set of policies associated with a port on which the back-end server is listening with a new set of policies :param name: balancer name to set policies of backend server :type name: ``str`` :param instance_port: Instance Port :type instance_port: ``int`` :param policies: List of policies to be associated with the balancer :type policies: ``string list` """ params = { 'Action': 'SetLoadBalancerPoliciesForBackendServer', 'LoadBalancerName': name, 'InstancePort': str(instance_port) } if policies: params = self._create_list_params(params, policies, 'PolicyNames.member.%d') response = self.connection.request(ROOT, params=params) return response.status == httplib.OK def ex_create_balancer_listeners(self, name, listeners=None): """ Creates one or more listeners on a load balancer for the specified port :param name: The mnemonic name associated with the load balancer :type name: ``str`` :param listeners: Each tuple contain values, (LoadBalancerPortNumber, InstancePortNumber, Protocol,[SSLCertificateId]) :type listeners: ``list of tuple` """ params = { 'Action': 'CreateLoadBalancerListeners', 'LoadBalancerName': name } for index, listener in enumerate(listeners): i = index + 1 protocol = listener[2].upper() params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] params['Listeners.member.%d.InstancePort' % i] = listener[1] params['Listeners.member.%d.Protocol' % i] = listener[2] if protocol == 'HTTPS' or protocol == 'SSL': params['Listeners.member.%d. \ SSLCertificateId' % i] = listener[3] else: return False response = self.connection.request(ROOT, params=params) return response.status == httplib.OK def _to_policies(self, data): xpath = 'DescribeLoadBalancerPoliciesResult/PolicyDescriptions/member' return [findtext(element=el, xpath='PolicyName', namespace=NS) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_policy_types(self, data): xpath = 'DescribeLoadBalancerPolicyTypesResult/' xpath += 'PolicyTypeDescriptions/member' return [findtext(element=el, xpath='PolicyTypeName', namespace=NS) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_balancers(self, data): xpath = 'DescribeLoadBalancersResult/LoadBalancerDescriptions/member' return [self._to_balancer(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_balancer(self, el): name = findtext(element=el, xpath='LoadBalancerName', namespace=NS) dns_name = findtext(el, xpath='DNSName', namespace=NS) port = findtext(el, xpath='LoadBalancerPort', namespace=NS) balancer = LoadBalancer( id=name, name=name, state=State.UNKNOWN, ip=dns_name, port=port, driver=self.connection.driver ) xpath = 'Instances/member/InstanceId' members = findall(element=el, xpath=xpath, namespace=NS) balancer._members = [] for m in members: balancer._members.append(Member(m.text, None, None, balancer=balancer)) return balancer def _to_tags(self, data): """ return tags dict """ tags = {} xpath = 'DescribeTagsResult/TagDescriptions/member/Tags/member' for el in findall(element=data, xpath=xpath, namespace=NS): key = findtext(element=el, xpath='Key', namespace=NS) value = findtext(element=el, xpath='Value', namespace=NS) if key: tags[key] = value return tags def _create_list_params(self, params, items, label): """ return parameter list """ if isinstance(items, str): items = [items] for index, item in enumerate(items): params[label % (index + 1)] = item return params def _ex_connection_class_kwargs(self): kwargs = super(ElasticLBDriver, self)._ex_connection_class_kwargs() if hasattr(self, 'token') and self.token is not None: kwargs['token'] = self.token kwargs['signature_version'] = '4' else: kwargs['signature_version'] = self.signature_version return kwargs def _ex_list_balancer_tags(self, balancer_id): params = { 'Action': 'DescribeTags', 'LoadBalancerNames.member.1': balancer_id } data = self.connection.request(ROOT, params=params).object return self._to_tags(data) def _ex_populate_balancer_tags(self, balancer): tags = balancer.extra.get('tags', {}) tags.update(self._ex_list_balancer_tags(balancer.id)) if tags: balancer.extra['tags'] = tags return balancer apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/gogrid.py0000664000175000017500000002035112701023453024123 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time from libcloud.utils.py3 import httplib try: import simplejson as json except ImportError: import json from libcloud.utils.misc import reverse_dict from libcloud.common.types import LibcloudError from libcloud.common.gogrid import GoGridConnection, GoGridResponse,\ BaseGoGridDriver from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM from libcloud.loadbalancer.types import State, LibcloudLBImmutableError class GoGridLBResponse(GoGridResponse): def success(self): if self.status == httplib.INTERNAL_SERVER_ERROR: # Hack, but at least this error message is more useful than # "unexpected server error" body = json.loads(self.body) if body['method'] == '/grid/loadbalancer/add' and \ len(body['list']) >= 1 and \ body['list'][0]['message'].find( 'unexpected server error') != -1: raise LibcloudError( value='You mostly likely tried to add a member with an IP' ' address not assigned to your account', driver=self) return super(GoGridLBResponse, self).success() class GoGridLBConnection(GoGridConnection): """ Connection class for the GoGrid load-balancer driver. """ responseCls = GoGridLBResponse class GoGridLBDriver(BaseGoGridDriver, Driver): connectionCls = GoGridLBConnection api_name = 'gogrid_lb' name = 'GoGrid LB' website = 'http://www.gogrid.com/' LB_STATE_MAP = {'On': State.RUNNING, 'Unknown': State.UNKNOWN} _VALUE_TO_ALGORITHM_MAP = { 'round robin': Algorithm.ROUND_ROBIN, 'least connect': Algorithm.LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) def __init__(self, *args, **kwargs): """ @inherits: :class:`Driver.__init__` """ super(GoGridLBDriver, self).__init__(*args, **kwargs) def list_protocols(self): # GoGrid only supports http return ['http'] def list_balancers(self): return self._to_balancers( self.connection.request('/api/grid/loadbalancer/list').object) def ex_create_balancer_nowait(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM): """ @inherits: :class:`Driver.create_balancer` """ algorithm = self._algorithm_to_value(algorithm) params = {'name': name, 'loadbalancer.type': algorithm, 'virtualip.ip': self._get_first_ip(), 'virtualip.port': port} params.update(self._members_to_params(members)) resp = self.connection.request('/api/grid/loadbalancer/add', method='GET', params=params) return self._to_balancers(resp.object)[0] def create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM): balancer = self.ex_create_balancer_nowait(name, members, protocol, port, algorithm) timeout = 60 * 20 waittime = 0 interval = 2 * 15 if balancer.id is not None: return balancer else: while waittime < timeout: balancers = self.list_balancers() for i in balancers: if i.name == balancer.name and i.id is not None: return i waittime += interval time.sleep(interval) raise Exception('Failed to get id') def destroy_balancer(self, balancer): try: resp = self.connection.request( '/api/grid/loadbalancer/delete', method='POST', params={'id': balancer.id}) except Exception: e = sys.exc_info()[1] if "Update request for LoadBalancer" in str(e): raise LibcloudLBImmutableError( "Cannot delete immutable object", GoGridLBDriver) else: raise return resp.status == 200 def get_balancer(self, **kwargs): params = {} try: params['name'] = kwargs['ex_balancer_name'] except KeyError: balancer_id = kwargs['balancer_id'] params['id'] = balancer_id resp = self.connection.request('/api/grid/loadbalancer/get', params=params) return self._to_balancers(resp.object)[0] def balancer_attach_member(self, balancer, member): members = self.balancer_list_members(balancer) members.append(member) params = {"id": balancer.id} params.update(self._members_to_params(members)) resp = self._update_balancer(params) return [m for m in self._to_members(resp.object["list"][0]["realiplist"], balancer) if m.ip == member.ip][0] def balancer_detach_member(self, balancer, member): members = self.balancer_list_members(balancer) remaining_members = [n for n in members if n.id != member.id] params = {"id": balancer.id} params.update(self._members_to_params(remaining_members)) resp = self._update_balancer(params) return resp.status == 200 def balancer_list_members(self, balancer): resp = self.connection.request('/api/grid/loadbalancer/get', params={'id': balancer.id}) return self._to_members(resp.object["list"][0]["realiplist"], balancer) def _update_balancer(self, params): try: return self.connection.request('/api/grid/loadbalancer/edit', method='POST', params=params) except Exception: e = sys.exc_info()[1] if "Update already pending" in str(e): raise LibcloudLBImmutableError( "Balancer is immutable", GoGridLBDriver) raise LibcloudError(value='Exception: %s' % str(e), driver=self) def _members_to_params(self, members): """ Helper method to convert list of :class:`Member` objects to GET params. """ params = {} i = 0 for member in members: params["realiplist.%s.ip" % i] = member.ip params["realiplist.%s.port" % i] = member.port i += 1 return params def _to_balancers(self, object): return [self._to_balancer(el) for el in object["list"]] def _to_balancer(self, el): lb = LoadBalancer(id=el.get("id"), name=el["name"], state=self.LB_STATE_MAP.get( el["state"]["name"], State.UNKNOWN), ip=el["virtualip"]["ip"]["ip"], port=el["virtualip"]["port"], driver=self.connection.driver) return lb def _to_members(self, object, balancer=None): return [self._to_member(el, balancer) for el in object] def _to_member(self, el, balancer=None): member = Member(id=el["ip"]["id"], ip=el["ip"]["ip"], port=el["port"], balancer=balancer) return member apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/alb.py0000664000175000017500000002514213153541406023416 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'ApplicationLBDriver' ] from libcloud.utils.xml import findtext, findall from libcloud.loadbalancer.types import State from libcloud.loadbalancer.base import Driver, LoadBalancer, Member from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection VERSION = '2015-12-01' HOST = 'elasticloadbalancing.%s.amazonaws.com' ROOT = '/%s/' % (VERSION) NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, ) class ALBResponse(AWSGenericResponse): """ Amazon ALB response class. """ namespace = NS exceptions = {} xpath = 'Error' class ALBConnection(SignedAWSConnection): version = VERSION host = HOST responseCls = ALBResponse service_name = 'elasticloadbalancing' class ApplicationLBDriver(Driver): name = 'Amazon Application Load Balancing' website = 'http://aws.amazon.com/elasticloadbalancing/' connectionCls = ALBConnection signature_version = '4' def __init__(self, access_id, secret, region, token=None): self.token = token self.region = region self.region_name = region super(ApplicationLBDriver, self).__init__( access_id, secret, token=token, host=HOST % region, region=region ) def list_protocols(self): return ['http', 'https'] def list_balancers(self): params = {'Action': 'DescribeLoadBalancers'} data = self.connection.request(ROOT, params=params).object return self._to_balancers(data) def balancer_list_members(self, balancer): return balancer._members def get_balancer(self, balancer_id): params = { 'Action': 'DescribeLoadBalancers', 'LoadBalancerNames.member.1': balancer_id } data = self.connection.request(ROOT, params=params).object return self._to_balancers(data)[0] def ex_balancer_list_listeners(self, balancer): return balancer.extra.get('listeners', []) def _to_listeners(self, data): xpath = 'DescribeListenersResult/Listeners/member' return [self._to_listener(el) for el in findall( element=data, xpath=xpath, namespace=NS )] def _to_listener(self, el): listener_arn = findtext(element=el, xpath='ListenerArn', namespace=NS) listener = { 'id': listener_arn, 'protocol': findtext(element=el, xpath='Protocol', namespace=NS), 'port': findtext(element=el, xpath='Port', namespace=NS), 'rules': self._ex_get_rules_for_listener(listener_arn) } return listener def _to_targets(self, data): xpath = 'DefaultActions/member' return [self._to_target(el) for el in findall( element=data, xpath=xpath, namespace=NS )] def _to_target(self, el): return findtext( element=el, xpath='DefaultActions/member/TargetGroupArn', namespace=NS ) def _to_balancer(self, el): name = findtext(element=el, xpath='LoadBalancerName', namespace=NS) id = findtext(element=el, xpath='LoadBalancerArn', namespace=NS) dns_name = findtext(el, xpath='DNSName', namespace=NS) balancer = LoadBalancer( id=id, name=name, state=State.UNKNOWN, ip=dns_name, port=None, driver=self.connection.driver ) extra = { 'listeners': self._ex_get_balancer_listeners(balancer), 'target_groups': self._ex_get_balancer_target_groups(balancer), 'tags': self._ex_get_balancer_tags(balancer) } balancer.extra = extra if len(extra['listeners']) > 0: balancer.port = extra['listeners'][0]['port'] else: balancer.port = None balancer._members = self._ex_get_balancer_memebers(balancer) return balancer def _to_balancers(self, data): xpath = 'DescribeLoadBalancersResult/LoadBalancers/member' return [self._to_balancer(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_tags(self, data): """ return tags dict """ tags = {} xpath = 'DescribeTagsResult/TagDescriptions/member/Tags/member' for el in findall(element=data, xpath=xpath, namespace=NS): key = findtext(element=el, xpath='Key', namespace=NS) value = findtext(element=el, xpath='Value', namespace=NS) if key: tags[key] = value return tags def _to_rule(self, el): def __to_bool__(val): return val.lower() in ("yes", "true", "t", "1") id = findtext(element=el, xpath='RuleArn', namespace=NS) is_default = findtext(element=el, xpath='IsDefault', namespace=NS) priority = findtext(element=el, xpath='Priority', namespace=NS) target_group = findtext( element=el, xpath='Actions/member/TargetGroupArn', namespace=NS ) conditions = {} cond_members = findall( element=el, xpath='Conditions/member', namespace=NS ) for cond_member in cond_members: field = findtext(element=cond_member, xpath='Field', namespace=NS) conditions[field] = [] value_members = findall( element=cond_member, xpath='Values/member', namespace=NS ) for value_member in value_members: conditions[field].append(value_member.text) rule = { 'id': id, 'is_default': __to_bool__(is_default), 'priority': priority, 'target_group': target_group, 'conditions': conditions } return rule def _to_rules(self, data): xpath = 'DescribeRulesResult/Rules/member' return [self._to_rule(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_target_groups(self, data): xpath = 'DescribeTargetGroupsResult/TargetGroups/member' return [self._to_target_group(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_target_group(self, el): target_group_arn = findtext( element=el, xpath='TargetGroupArn', namespace=NS ) name = findtext(element=el, xpath='TargetGroupName', namespace=NS) members = self._ex_get_target_group_members(target_group_arn) return {'id': target_group_arn, 'name': name, 'members': members} def _to_target_group_members(self, data): xpath = 'DescribeTargetHealthResult/TargetHealthDescriptions/member' return [self._to_target_group_member(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_target_group_member(self, el): id = findtext(element=el, xpath='Target/Id', namespace=NS) port = findtext(element=el, xpath='Target/Port', namespace=NS) health = findtext( element=el, xpath='TargetHealth/State', namespace=NS ) return {'id': id, 'port': port, 'health': health} def _ex_get_balancer_memebers(self, balancer): balancer_members = [] for tg in balancer.extra['target_groups']: for tg_member in tg['members']: new_member = Member( tg_member['id'], None, tg_member['port'], balancer=balancer, extra={ 'health': tg_member['health'], 'target_group': tg['name'] } ) balancer_members.append(new_member) return balancer_members def _ex_get_target_group_members(self, target_group_arn): """ Return a list of target group member dicts. :rtype: ``list`` of ``dict`` """ params = { 'Action': 'DescribeTargetHealth', 'TargetGroupArn': target_group_arn } data = self.connection.request(ROOT, params=params).object return self._to_target_group_members(data) def _ex_get_balancer_target_groups(self, balancer): """ Return a list of load balancer target groups with members. :rtype: ``list`` of ``dict`` """ params = { 'Action': 'DescribeTargetGroups', 'LoadBalancerArn': balancer.id } data = self.connection.request(ROOT, params=params).object return self._to_target_groups(data) def _ex_get_balancer_listeners(self, balancer): """ Return a list of load balancer listeners dicts. :rtype: ``list`` of ``dict`` """ params = { 'Action': 'DescribeListeners', 'LoadBalancerArn': balancer.id } data = self.connection.request(ROOT, params=params).object return self._to_listeners(data) def _ex_get_rules_for_listener(self, listener_arn): """ Return a list of listeners rule dicts. :rtype: ``list`` of ``dict`` """ params = { 'Action': 'DescribeRules', 'ListenerArn': listener_arn } data = self.connection.request(ROOT, params=params).object return self._to_rules(data) def _ex_connection_class_kwargs(self): pdriver = super(ApplicationLBDriver, self) kwargs = pdriver._ex_connection_class_kwargs() if hasattr(self, 'token') and self.token is not None: kwargs['token'] = self.token kwargs['signature_version'] = '4' else: kwargs['signature_version'] = self.signature_version return kwargs def _ex_get_balancer_tags(self, balancer): params = { 'Action': 'DescribeTags', 'ResourceArns.member.1': balancer.id } data = self.connection.request(ROOT, params=params).object return self._to_tags(data) apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/cloudstack.py0000664000175000017500000001706712701023453025016 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.cloudstack import CloudStackDriverMixIn from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM from libcloud.loadbalancer.types import Provider from libcloud.loadbalancer.types import State from libcloud.utils.misc import reverse_dict class CloudStackLBDriver(CloudStackDriverMixIn, Driver): """Driver for CloudStack load balancers.""" api_name = 'cloudstack_lb' name = 'CloudStack' website = 'http://cloudstack.org/' type = Provider.CLOUDSTACK _VALUE_TO_ALGORITHM_MAP = { 'roundrobin': Algorithm.ROUND_ROBIN, 'leastconn': Algorithm.LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) LB_STATE_MAP = { 'Active': State.RUNNING, } def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None, *args, **kwargs): """ @inherits: :class:`Driver.__init__` """ host = host if host else self.host path = path if path else self.path if path is not None: self.path = path if host is not None: self.host = host if (self.type == Provider.CLOUDSTACK) and (not host or not path): raise Exception('When instantiating CloudStack driver directly ' + 'you also need to provide host and path argument') super(CloudStackLBDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port) def list_protocols(self): """ We don't actually have any protocol awareness beyond TCP. :rtype: ``list`` of ``str`` """ return ['tcp'] def list_balancers(self): balancers = self._sync_request(command='listLoadBalancerRules', method='GET') balancers = balancers.get('loadbalancerrule', []) return [self._to_balancer(balancer) for balancer in balancers] def get_balancer(self, balancer_id): balancer = self._sync_request(command='listLoadBalancerRules', params={'id': balancer_id}, method='GET') balancer = balancer.get('loadbalancerrule', []) if not balancer: raise Exception("no such load balancer: " + str(balancer_id)) return self._to_balancer(balancer[0]) def create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM, location=None, private_port=None, network_id=None, vpc_id=None): """ @inherits: :class:`Driver.create_balancer` :param location: Location :type location: :class:`NodeLocation` :param private_port: Private port :type private_port: ``int`` :param network_id: The guest network this rule will be created for. :type network_id: ``str`` """ args = {} ip_args = {} if location is None: locations = self._sync_request(command='listZones', method='GET') location = locations['zone'][0]['id'] else: location = location.id if private_port is None: private_port = port if network_id is not None: args['networkid'] = network_id ip_args['networkid'] = network_id if vpc_id is not None: ip_args['vpcid'] = vpc_id ip_args.update({'zoneid': location, 'networkid': network_id, 'vpc_id': vpc_id}) result = self._async_request(command='associateIpAddress', params=ip_args, method='GET') public_ip = result['ipaddress'] args.update({'algorithm': self._ALGORITHM_TO_VALUE_MAP[algorithm], 'name': name, 'privateport': private_port, 'publicport': port, 'publicipid': public_ip['id']}) result = self._sync_request( command='createLoadBalancerRule', params=args, method='GET') listbalancers = self._sync_request( command='listLoadBalancerRules', params=args, method='GET') listbalancers = [rule for rule in listbalancers['loadbalancerrule'] if rule['id'] == result['id']] if len(listbalancers) != 1: return None balancer = self._to_balancer(listbalancers[0]) for member in members: balancer.attach_member(member) return balancer def destroy_balancer(self, balancer): self._async_request(command='deleteLoadBalancerRule', params={'id': balancer.id}, method='GET') self._async_request(command='disassociateIpAddress', params={'id': balancer.ex_public_ip_id}, method='GET') def balancer_attach_member(self, balancer, member): member.port = balancer.ex_private_port self._async_request(command='assignToLoadBalancerRule', params={'id': balancer.id, 'virtualmachineids': member.id}, method='GET') return True def balancer_detach_member(self, balancer, member): self._async_request(command='removeFromLoadBalancerRule', params={'id': balancer.id, 'virtualmachineids': member.id}, method='GET') return True def balancer_list_members(self, balancer): members = self._sync_request(command='listLoadBalancerRuleInstances', params={'id': balancer.id}, method='GET') members = members['loadbalancerruleinstance'] return [self._to_member(m, balancer.ex_private_port, balancer) for m in members] def _to_balancer(self, obj): balancer = LoadBalancer( id=obj['id'], name=obj['name'], state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN), ip=obj['publicip'], port=obj['publicport'], driver=self.connection.driver ) balancer.ex_private_port = obj['privateport'] balancer.ex_public_ip_id = obj['publicipid'] return balancer def _to_member(self, obj, port, balancer): return Member( id=obj['id'], ip=obj['nic'][0]['ipaddress'], port=port, balancer=balancer ) apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/softlayer.py0000664000175000017500000003541012701023453024662 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance withv # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'SoftlayerLBDriver' ] from libcloud.common.types import LibcloudError from libcloud.common.softlayer import SoftLayerConnection from libcloud.utils.misc import find, reverse_dict from libcloud.loadbalancer.types import State from libcloud.loadbalancer.base import Algorithm, Driver, LoadBalancer from libcloud.loadbalancer.base import DEFAULT_ALGORITHM, Member lb_service = 'SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_'\ 'VirtualIpAddress' class LBPackage(object): """ Defines a single Softlayer package to be used when placing orders ( e.g. via ex_place_balancer_order method). :param id: Package id. :type id: ``int`` :param name: Package name. :type name: ``str`` :param description: Package short description. :type description: ``str`` :param price_id: Id of the price for this package. :type price_id: ``int`` :param capacity: Provides a numerical representation of the capacity given in the description of this package. :type capacity: ``int`` """ def __init__(self, id, name, description, price_id, capacity): self.id = id self.name = name self.description = description self.price_id = price_id self.capacity = capacity def __repr__(self): return ( '' % (self.id, self.name, self.description, self.price_id, self.capacity)) class SoftlayerLBDriver(Driver): name = 'Softlayer Load Balancing' website = 'http://www.softlayer.com/' connectionCls = SoftLayerConnection _VALUE_TO_ALGORITHM_MAP = { 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS, 'SHORTEST_RESPONSE': Algorithm.SHORTEST_RESPONSE, 'PERSISTENT_IP': Algorithm.PERSISTENT_IP } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) def list_balancers(self): mask = { 'adcLoadBalancers': { 'ipAddress': '', 'loadBalancerHardware': { 'datacenter': '' }, 'virtualServers': { 'serviceGroups': { 'routingMethod': '', 'routingType': '', 'services': { 'ipAddress': '' } } } } } res = self.connection.request( 'SoftLayer_Account', 'getAdcLoadBalancers', object_mask=mask).object return [self._to_balancer(lb) for lb in res] def get_balancer(self, balancer_id): balancers = self.list_balancers() balancer = find(balancers, lambda b: b.id == balancer_id) if not balancer: raise LibcloudError(value='No balancer found for id: %s' % balancer_id, driver=self) return balancer def list_protocols(self): """ Return a list of supported protocols. :rtype: ``list`` of ``str`` """ return ['dns', 'ftp', 'http', 'https', 'tcp', 'udp'] def balancer_list_members(self, balancer): lb = self._get_balancer_model(balancer.id) members = [] vs = self._locate_service_group(lb, balancer.port) if vs: if vs['serviceGroups']: srvgrp = vs['serviceGroups'][0] members = [self._to_member(srv, balancer) for srv in srvgrp['services']] return members def balancer_attach_member(self, balancer, member): lb = self._get_balancer_model(balancer.id) vs = self._locate_service_group(lb, balancer.port) if not vs: raise LibcloudError(value='No service_group found for balancer ' 'port: %s' % balancer.port, driver=self) if vs['serviceGroups']: services = vs['serviceGroups'][0]['services'] services.append(self._to_service_template(member.ip, member.port)) self.connection.request(lb_service, 'editObject', lb, id=balancer.id) return [m for m in balancer.list_members() if m.ip == member.ip][0] def balancer_detach_member(self, balancer, member): svc_lbsrv = 'SoftLayer_Network_Application_Delivery_Controller_'\ 'LoadBalancer_Service' self.connection.request(svc_lbsrv, 'deleteObject', id=member.id) return True def destroy_balancer(self, balancer): res_billing = self.connection.request(lb_service, 'getBillingItem', id=balancer.id).object self.connection.request('SoftLayer_Billing_Item', 'cancelService', id=res_billing['id']) return True def ex_list_balancer_packages(self): """ Retrieves the available local load balancer packages. :rtype: ``list`` of :class:`LBPackage` """ mask = { 'prices': '' } res = self.connection.request('SoftLayer_Product_Package', 'getItems', id=0, object_mask=mask).object res_lb_pkgs = [r for r in res if r['description'].find ('Load Balancer') != -1] res_lb_pkgs = [r for r in res_lb_pkgs if not r['description']. startswith('Global')] return [self._to_lb_package(r) for r in res_lb_pkgs] def ex_place_balancer_order(self, package, location): """ Places an order for a local loadbalancer in the specified location. :param package: The package to create the loadbalancer from. :type package: :class:`LBPackage` :param string location: The location (datacenter) to create the loadbalancer. :type location: :class:`NodeLocation` :return: ``True`` if ex_place_balancer_order was successful. :rtype: ``bool`` """ data = { 'complexType': 'SoftLayer_Container_Product_Order_Network_' 'LoadBalancer', 'quantity': 1, 'packageId': 0, 'location': self._get_location(location.id), 'prices': [{'id': package.price_id}] } self.connection.request('SoftLayer_Product_Order', 'placeOrder', data) return True def ex_configure_load_balancer(self, balancer, port=80, protocol='http', algorithm=DEFAULT_ALGORITHM, ex_allocation=100): """ Configure the loadbalancer by adding it with a front-end port (aka a service group in the Softlayer loadbalancer model). Softlayer loadbalancer may be defined with multiple service groups (front-end ports) each defined with a unique port number. :param balancer: The loadbalancer. :type balancer: :class:`LoadBalancer` :param port: Port of the service group, defaults to 80. :type port: ``int`` :param protocol: Loadbalancer protocol, defaults to http. :type protocol: ``str`` :param algorithm: Load balancing algorithm, defaults to Algorithm.ROUND_ROBIN :type algorithm: :class:`Algorithm` :param ex_allocation: The percentage of the total connection allocations to allocate for this group. :type ex_allocation: ``int`` :return: ``True`` if ex_add_service_group was successful. :rtype: ``bool`` """ _types = self._get_routing_types() _methods = self._get_routing_methods() rt = find(_types, lambda t: t['keyname'] == protocol.upper()) if not rt: raise LibcloudError(value='Invalid protocol %s' % protocol, driver=self) value = self._algorithm_to_value(algorithm) meth = find(_methods, lambda m: m['keyname'] == value) if not meth: raise LibcloudError(value='Invalid algorithm %s' % algorithm, driver=self) service_group_template = { 'port': port, 'allocation': ex_allocation, 'serviceGroups': [{ 'routingTypeId': rt['id'], 'routingMethodId': meth['id'] }] } lb = self._get_balancer_model(balancer.id) if len(lb['virtualServers']) > 0: port = lb['virtualServers'][0]['port'] raise LibcloudError(value='Loadbalancer already configured with ' 'a service group (front-end port)' % port, driver=self) lb['virtualServers'].append(service_group_template) self.connection.request(lb_service, 'editObject', lb, id=balancer.id) return True def _get_balancer_model(self, balancer_id): """ Retrieve Softlayer loadbalancer model. """ lb_mask = { 'virtualServers': { 'serviceGroups': { 'services': { 'ipAddress': '', 'groupReferences': '', } } } } lb_res = self.connection.request(lb_service, 'getObject', object_mask=lb_mask, id=balancer_id).\ object return lb_res def _locate_service_group(self, lb, port): """ Locate service group with given port. Return virtualServers (vs) entry whose port matches the supplied parameter port. For a negative port, just return the first vs entry. None is returned if no match found. :param lb: Softlayer loadbalancer model. :type lb: ``dict`` :param port: loadbalancer front-end port. :type port: ``int`` :return: Matched entry in the virtualServers array of the supplied model. :rtype: ``dict`` """ vs = None if port < 0: vs = lb['virtualServers'][0] if lb['virtualServers']\ else None else: vs = find(lb['virtualServers'], lambda v: v['port'] == port) return vs def _get_routing_types(self): svc_rtype = 'SoftLayer_Network_Application_Delivery_Controller_'\ 'LoadBalancer_Routing_Type' return self.connection.request(svc_rtype, 'getAllObjects').object def _get_routing_methods(self): svc_rmeth = 'SoftLayer_Network_Application_Delivery_Controller_'\ 'LoadBalancer_Routing_Method' return self.connection.request(svc_rmeth, 'getAllObjects').object def _get_location(self, location_id): res = self.connection.request('SoftLayer_Location_Datacenter', 'getDatacenters').object dcenter = find(res, lambda d: d['name'] == location_id) if not dcenter: raise LibcloudError(value='Invalid value %s' % location_id, driver=self) return dcenter['id'] def _get_ipaddress(self, ip): svc_ipaddress = 'SoftLayer_Network_Subnet_IpAddress' return self.connection.request(svc_ipaddress, 'getByIpAddress', ip).object def _to_lb_package(self, pkg): try: price_id = pkg['prices'][0]['id'] except: price_id = -1 capacity = int(pkg.get('capacity', 0)) return LBPackage(id=pkg['id'], name=pkg['keyName'], description=pkg['description'], price_id=price_id, capacity=capacity) def _to_service_template(self, ip, port): """ Builds single member entry in Softlayer loadbalancer model """ template = { 'enabled': 1, # enable the service 'port': port, # back-end port 'ipAddressId': self._get_ipaddress(ip)['id'], 'healthChecks': [{ 'healthCheckTypeId': 21 # default health check }], 'groupReferences': [{ 'weight': 1 }] } return template def _to_balancer(self, lb): ipaddress = lb['ipAddress']['ipAddress'] extra = {} extra['connection_limit'] = lb['connectionLimit'] extra['ssl_active'] = lb['sslActiveFlag'] extra['ssl_enabled'] = lb['sslEnabledFlag'] extra['ha'] = lb['highAvailabilityFlag'] extra['datacenter'] = \ lb['loadBalancerHardware'][0]['datacenter']['name'] # In Softlayer, there could be multiple group of members (aka service # groups), so retrieve the first one vs = self._locate_service_group(lb, -1) if vs: port = vs['port'] if vs['serviceGroups']: srvgrp = vs['serviceGroups'][0] routing_method = srvgrp['routingMethod']['keyname'] routing_type = srvgrp['routingType']['keyname'] try: extra['algorithm'] = self.\ _value_to_algorithm(routing_method) except: pass extra['protocol'] = routing_type.lower() if not vs: port = -1 balancer = LoadBalancer( id=lb['id'], name='', state=State.UNKNOWN, ip=ipaddress, port=port, driver=self.connection.driver, extra=extra ) return balancer def _to_member(self, srv, balancer=None): svc_id = srv['id'] ip = srv['ipAddress']['ipAddress'] port = srv['port'] extra = {} extra['status'] = srv['status'] extra['enabled'] = srv['enabled'] return Member(id=svc_id, ip=ip, port=port, balancer=balancer, extra=extra) apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/brightbox.py0000664000175000017500000001133712701023453024644 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import httplib from libcloud.common.brightbox import BrightboxConnection from libcloud.loadbalancer.base import Driver, Algorithm, Member from libcloud.loadbalancer.base import LoadBalancer from libcloud.loadbalancer.types import State from libcloud.utils.misc import reverse_dict API_VERSION = '1.0' class BrightboxLBDriver(Driver): connectionCls = BrightboxConnection name = 'Brightbox' website = 'http://www.brightbox.co.uk/' LB_STATE_MAP = { 'creating': State.PENDING, 'active': State.RUNNING, 'deleting': State.UNKNOWN, 'deleted': State.UNKNOWN, 'failing': State.UNKNOWN, 'failed': State.UNKNOWN, } _VALUE_TO_ALGORITHM_MAP = { 'round-robin': Algorithm.ROUND_ROBIN, 'least-connections': Algorithm.LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) def list_protocols(self): return ['tcp', 'http'] def list_balancers(self): data = self.connection.request('/%s/load_balancers' % API_VERSION) \ .object return list(map(self._to_balancer, data)) def create_balancer(self, name, port, protocol, algorithm, members): response = self._post( '/%s/load_balancers' % API_VERSION, {'name': name, 'nodes': list(map(self._member_to_node, members)), 'policy': self._algorithm_to_value(algorithm), 'listeners': [{'in': port, 'out': port, 'protocol': protocol}], 'healthcheck': {'type': protocol, 'port': port}} ) return self._to_balancer(response.object) def destroy_balancer(self, balancer): response = self.connection.request('/%s/load_balancers/%s' % (API_VERSION, balancer.id), method='DELETE') return response.status == httplib.ACCEPTED def get_balancer(self, balancer_id): data = self.connection.request( '/%s/load_balancers/%s' % (API_VERSION, balancer_id)).object return self._to_balancer(data) def balancer_attach_compute_node(self, balancer, node): return self.balancer_attach_member(balancer, node) def balancer_attach_member(self, balancer, member): path = '/%s/load_balancers/%s/add_nodes' % (API_VERSION, balancer.id) self._post(path, {'nodes': [self._member_to_node(member)]}) return member def balancer_detach_member(self, balancer, member): path = '/%s/load_balancers/%s/remove_nodes' % (API_VERSION, balancer.id) response = self._post(path, {'nodes': [self._member_to_node(member)]}) return response.status == httplib.ACCEPTED def balancer_list_members(self, balancer): path = '/%s/load_balancers/%s' % (API_VERSION, balancer.id) data = self.connection.request(path).object def func(data): return self._node_to_member(data, balancer) return list(map(func, data['nodes'])) def _post(self, path, data={}): headers = {'Content-Type': 'application/json'} return self.connection.request(path, data=data, headers=headers, method='POST') def _to_balancer(self, data): return LoadBalancer( id=data['id'], name=data['name'], state=self.LB_STATE_MAP.get(data['status'], State.UNKNOWN), ip=self._public_ip(data), port=data['listeners'][0]['in'], driver=self.connection.driver ) def _member_to_node(self, member): return {'node': member.id} def _node_to_member(self, data, balancer): return Member(id=data['id'], ip=None, port=None, balancer=balancer) def _public_ip(self, data): if len(data['cloud_ips']) > 0: ip = data['cloud_ips'][0]['public_ip'] else: ip = None return ip apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/gce.py0000664000175000017500000003253613153541406023423 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import simplejson as json except ImportError: import json # NOQA from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.compute.drivers.gce import GCEConnection, GCENodeDriver # GCE doesn't actually give you an algorithm choice, but this is here simply as # the closest match. The actual algorithm is described here: # https://developers.google.com/compute/docs/load-balancing/#overview DEFAULT_ALGORITHM = Algorithm.RANDOM class GCELBDriver(Driver): connectionCls = GCEConnection apiname = 'googleapis' name = 'Google Compute Engine Load Balancer' website = 'https://cloud.google.com/' _VALUE_TO_ALGORITHM_MAP = { 'RANDOM': Algorithm.RANDOM } def __init__(self, *args, **kwargs): if kwargs.get('gce_driver'): self.gce = kwargs['gce_driver'] else: self.gce = GCENodeDriver(*args, **kwargs) self.connection = self.gce.connection def _get_node_from_ip(self, ip): """ Return the node object that matches a given public IP address. :param ip: Public IP address to search for :type ip: ``str`` :return: Node object that has the given IP, or None if not found. :rtype: :class:`Node` or None """ all_nodes = self.gce.list_nodes(ex_zone='all') for node in all_nodes: if ip in node.public_ips: return node return None def list_protocols(self): """ Return a list of supported protocols. For GCE, this is simply a hardcoded list. :rtype: ``list`` of ``str`` """ return ['TCP', 'UDP'] def list_balancers(self, ex_region=None): """ List all loadbalancers :keyword ex_region: The region to return balancers from. If None, will default to self.region. If 'all', will return all balancers. :type ex_region: ``str`` or :class:`GCERegion` or ``None`` :rtype: ``list`` of :class:`LoadBalancer` """ balancers = [] for fwr in self.gce.ex_list_forwarding_rules(region=ex_region): balancers.append(self._forwarding_rule_to_loadbalancer(fwr)) return balancers def create_balancer(self, name, port, protocol, algorithm, members, ex_region=None, ex_healthchecks=None, ex_address=None, ex_session_affinity=None): """ Create a new load balancer instance. For GCE, this means creating a forwarding rule and a matching target pool, then adding the members to the target pool. :param name: Name of the new load balancer (required) :type name: ``str`` :param port: Port or range of ports the load balancer should listen on, defaults to all ports. Examples: '80', '5000-5999' :type port: ``str`` :param protocol: Load balancer protocol. Should be 'tcp' or 'udp', defaults to 'tcp'. :type protocol: ``str`` :param members: List of Members to attach to balancer. Can be Member objects or Node objects. Node objects are preferred for GCE, but Member objects are accepted to comply with the established libcloud API. Note that the 'port' attribute of the members is ignored. :type members: ``list`` of :class:`Member` or :class:`Node` :param algorithm: Load balancing algorithm. Ignored for GCE which uses a hashing-based algorithm. :type algorithm: :class:`Algorithm` or ``None`` :keyword ex_region: Optional region to create the load balancer in. Defaults to the default region of the GCE Node Driver. :type ex_region: C{GCERegion} or ``str`` :keyword ex_healthchecks: Optional list of healthcheck objects or names to add to the load balancer. :type ex_healthchecks: ``list`` of :class:`GCEHealthCheck` or ``list`` of ``str`` :keyword ex_address: Optional static address object to be assigned to the load balancer. :type ex_address: C{GCEAddress} :keyword ex_session_affinity: Optional algorithm to use for session affinity. This will modify the hashing algorithm such that a client will tend to stick to a particular Member. :type ex_session_affinity: ``str`` :return: LoadBalancer object :rtype: :class:`LoadBalancer` """ node_list = [] for member in members: # Member object if hasattr(member, 'ip'): if member.extra.get('node'): node_list.append(member.extra['node']) else: node_list.append(self._get_node_from_ip(member.ip)) # Node object elif hasattr(member, 'name'): node_list.append(member) # Assume it's a node name otherwise else: node_list.append(self.gce.ex_get_node(member, 'all')) # Create Target Pool tp_name = '%s-tp' % name targetpool = self.gce.ex_create_targetpool( tp_name, region=ex_region, healthchecks=ex_healthchecks, nodes=node_list, session_affinity=ex_session_affinity) # Create the Forwarding rule, but if it fails, delete the target pool. try: forwarding_rule = self.gce.ex_create_forwarding_rule( name, targetpool, region=ex_region, protocol=protocol, port_range=port, address=ex_address) except: targetpool.destroy() raise # Reformat forwarding rule to LoadBalancer object return self._forwarding_rule_to_loadbalancer(forwarding_rule) def destroy_balancer(self, balancer): """ Destroy a load balancer. For GCE, this means destroying the associated forwarding rule, then destroying the target pool that was attached to the forwarding rule. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :return: True if successful :rtype: ``bool`` """ destroy = balancer.extra['forwarding_rule'].destroy() if destroy: tp_destroy = balancer.extra['targetpool'].destroy() return tp_destroy else: return destroy def get_balancer(self, balancer_id): """ Return a :class:`LoadBalancer` object. :param balancer_id: Name of load balancer you wish to fetch. For GCE, this is the name of the associated forwarding rule. :param balancer_id: ``str`` :rtype: :class:`LoadBalancer` """ fwr = self.gce.ex_get_forwarding_rule(balancer_id) return self._forwarding_rule_to_loadbalancer(fwr) def balancer_attach_compute_node(self, balancer, node): """ Attach a compute node as a member to the load balancer. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param node: Node to join to the balancer :type node: :class:`Node` :return: Member after joining the balancer. :rtype: :class:`Member` """ add_node = balancer.extra['targetpool'].add_node(node) if add_node: return self._node_to_member(node, balancer) def balancer_attach_member(self, balancer, member): """ Attach a member to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member to join to the balancer :type member: :class:`Member` :return: Member after joining the balancer. :rtype: :class:`Member` """ node = member.extra.get('node') or self._get_node_from_ip(member.ip) add_node = balancer.extra['targetpool'].add_node(node) if add_node: return self._node_to_member(node, balancer) def balancer_detach_member(self, balancer, member): """ Detach member from balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member which should be used :type member: :class:`Member` :return: True if member detach was successful, otherwise False :rtype: ``bool`` """ node = member.extra.get('node') or self._get_node_from_ip(member.ip) remove_node = balancer.extra['targetpool'].remove_node(node) return remove_node def balancer_list_members(self, balancer): """ Return list of members attached to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``list`` of :class:`Member` """ return [self._node_to_member(n, balancer) for n in balancer.extra['targetpool'].nodes] def ex_create_healthcheck(self, *args, **kwargs): return self.gce.ex_create_healthcheck(*args, **kwargs) def ex_list_healthchecks(self): return self.gce.ex_list_healthchecks() def ex_balancer_attach_healthcheck(self, balancer, healthcheck): """ Attach a healthcheck to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param healthcheck: Healthcheck to add :type healthcheck: :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ return balancer.extra['targetpool'].add_healthcheck(healthcheck) def ex_balancer_detach_healthcheck(self, balancer, healthcheck): """ Detach healtcheck from balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param healthcheck: Healthcheck to remove :type healthcheck: :class:`GCEHealthCheck` :return: True if successful :rtype: ``bool`` """ return balancer.extra['targetpool'].remove_healthcheck(healthcheck) def ex_balancer_list_healthchecks(self, balancer): """ Return list of healthchecks attached to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``list`` of :class:`HealthChecks` """ return balancer.extra['healthchecks'] def _node_to_member(self, node, balancer): """ Return a Member object based on a Node. :param node: Node object :type node: :class:`Node` :keyword balancer: The balancer the member is attached to. :type balancer: :class:`LoadBalancer` :return: Member object :rtype: :class:`Member` """ # A balancer can have a node as a member, even if the node doesn't # exist. In this case, 'node' is simply a string to where the resource # would be found if it was there. if hasattr(node, 'name'): member_id = node.name else: member_id = node if hasattr(node, 'public_ips') and len(node.public_ips) > 0: member_ip = node.public_ips[0] else: member_ip = None extra = {'node': node} return Member(id=member_id, ip=member_ip, port=balancer.port, balancer=balancer, extra=extra) def _forwarding_rule_to_loadbalancer(self, forwarding_rule): """ Return a Load Balancer object based on a GCEForwardingRule object. :param forwarding_rule: ForwardingRule object :type forwarding_rule: :class:`GCEForwardingRule` :return: LoadBalancer object :rtype: :class:`LoadBalancer` """ extra = {} extra['forwarding_rule'] = forwarding_rule extra['targetpool'] = forwarding_rule.targetpool extra['healthchecks'] = forwarding_rule.targetpool.healthchecks return LoadBalancer(id=forwarding_rule.id, name=forwarding_rule.name, state=None, ip=forwarding_rule.address, port=forwarding_rule.extra['portRange'], driver=self, extra=extra) apache-libcloud-2.2.1/libcloud/loadbalancer/drivers/dimensiondata.py0000664000175000017500000012457413153541406025510 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance withv # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import ET from libcloud.common.dimensiondata import DimensionDataConnection from libcloud.common.dimensiondata import DimensionDataPool from libcloud.common.dimensiondata import DimensionDataPoolMember from libcloud.common.dimensiondata import DimensionDataVirtualListener from libcloud.common.dimensiondata import DimensionDataVIPNode from libcloud.common.dimensiondata import DimensionDataDefaultHealthMonitor from libcloud.common.dimensiondata import DimensionDataPersistenceProfile from libcloud.common.dimensiondata import \ DimensionDataVirtualListenerCompatibility from libcloud.common.dimensiondata import DimensionDataDefaultiRule from libcloud.common.dimensiondata import API_ENDPOINTS from libcloud.common.dimensiondata import DEFAULT_REGION from libcloud.common.dimensiondata import TYPES_URN from libcloud.utils.misc import reverse_dict from libcloud.utils.xml import fixxpath, findtext, findall from libcloud.loadbalancer.types import State from libcloud.loadbalancer.base import Algorithm, Driver, LoadBalancer from libcloud.loadbalancer.base import Member from libcloud.loadbalancer.types import Provider class DimensionDataLBDriver(Driver): """ DimensionData node driver. """ selected_region = None connectionCls = DimensionDataConnection name = 'Dimension Data Load Balancer' website = 'https://cloud.dimensiondata.com/' type = Provider.DIMENSIONDATA api_version = 1.0 network_domain_id = None _VALUE_TO_ALGORITHM_MAP = { 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS, 'SHORTEST_RESPONSE': Algorithm.SHORTEST_RESPONSE, 'PERSISTENT_IP': Algorithm.PERSISTENT_IP } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) _VALUE_TO_STATE_MAP = { 'NORMAL': State.RUNNING, 'PENDING_ADD': State.PENDING, 'PENDING_CHANGE': State.PENDING, 'PENDING_DELETE': State.PENDING, 'FAILED_ADD': State.ERROR, 'FAILED_CHANGE': State.ERROR, 'FAILED_DELETE': State.ERROR, 'REQUIRES_SUPPORT': State.ERROR } def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS and host is None: raise ValueError( 'Invalid region: %s, no host specified' % (region)) if region is not None: self.selected_region = API_ENDPOINTS[region] super(DimensionDataLBDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) def _ex_connection_class_kwargs(self): """ Add the region to the kwargs before the connection is instantiated """ kwargs = super(DimensionDataLBDriver, self)._ex_connection_class_kwargs() kwargs['region'] = self.selected_region return kwargs def create_balancer(self, name, port=None, protocol=None, algorithm=None, members=None, ex_listener_ip_address=None): """ Create a new load balancer instance :param name: Name of the new load balancer (required) :type name: ``str`` :param port: An integer in the range of 1-65535. If not supplied, it will be taken to mean 'Any Port' :type port: ``int`` :param protocol: Loadbalancer protocol, defaults to http. :type protocol: ``str`` :param members: list of Members to attach to balancer (optional) :type members: ``list`` of :class:`Member` :param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. :type algorithm: :class:`.Algorithm` :param ex_listener_ip_address: Must be a valid IPv4 in dot-decimal notation (x.x.x.x). :type ex_listener_ip_address: ``str`` :rtype: :class:`LoadBalancer` """ network_domain_id = self.network_domain_id if protocol is None: protocol = 'http' if algorithm is None: algorithm = Algorithm.ROUND_ROBIN # Create a pool first pool = self.ex_create_pool( network_domain_id=network_domain_id, name=name, ex_description=None, balancer_method=self._ALGORITHM_TO_VALUE_MAP[algorithm]) # Attach the members to the pool as nodes if members is not None: for member in members: node = self.ex_create_node( network_domain_id=network_domain_id, name=member.ip, ip=member.ip, ex_description=None) self.ex_create_pool_member( pool=pool, node=node, port=port) # Create the virtual listener (balancer) listener = self.ex_create_virtual_listener( network_domain_id=network_domain_id, name=name, ex_description=name, port=port, pool=pool, listener_ip_address=ex_listener_ip_address) return LoadBalancer( id=listener.id, name=listener.name, state=State.RUNNING, ip=listener.ip, port=port, driver=self, extra={'pool_id': pool.id, 'network_domain_id': network_domain_id, 'listener_ip_address': ex_listener_ip_address} ) def list_balancers(self, ex_network_domain_id=None): """ List all loadbalancers inside a geography or in given network. In Dimension Data terminology these are known as virtual listeners :param ex_network_domain_id: UUID of Network Domain if not None returns only balancers in the given network if None then returns all pools for the organization :type ex_network_domain_id: ``str`` :rtype: ``list`` of :class:`LoadBalancer` """ params = None if ex_network_domain_id is not None: params = {"networkDomainId": ex_network_domain_id} return self._to_balancers( self.connection .request_with_orgId_api_2('networkDomainVip/virtualListener', params=params).object) def get_balancer(self, balancer_id): """ Return a :class:`LoadBalancer` object. :param balancer_id: id of a load balancer you want to fetch :type balancer_id: ``str`` :rtype: :class:`LoadBalancer` """ bal = self.connection \ .request_with_orgId_api_2('networkDomainVip/virtualListener/%s' % balancer_id).object return self._to_balancer(bal) def list_protocols(self): """ Return a list of supported protocols. Since all protocols are support by Dimension Data, this is a list of common protocols. :rtype: ``list`` of ``str`` """ return ['http', 'https', 'tcp', 'udp', 'ftp', 'smtp'] def balancer_list_members(self, balancer): """ Return list of members attached to balancer. In Dimension Data terminology these are the members of the pools within a virtual listener. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``list`` of :class:`Member` """ pool_members = self.ex_get_pool_members(balancer.extra['pool_id']) members = [] for pool_member in pool_members: members.append(Member( id=pool_member.id, ip=pool_member.ip, port=pool_member.port, balancer=balancer, extra=None )) return members def balancer_attach_member(self, balancer, member): """ Attach a member to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member to join to the balancer :type member: :class:`Member` :return: Member after joining the balancer. :rtype: :class:`Member` """ node = self.ex_create_node( network_domain_id=balancer.extra['network_domain_id'], name='Member.' + member.ip, ip=member.ip, ex_description='' ) if node is False: return False pool = self.ex_get_pool(balancer.extra['pool_id']) pool_member = self.ex_create_pool_member( pool=pool, node=node, port=member.port) member.id = pool_member.id return member def balancer_detach_member(self, balancer, member): """ Detach member from balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member which should be used :type member: :class:`Member` :return: ``True`` if member detach was successful, otherwise ``False``. :rtype: ``bool`` """ create_pool_m = ET.Element('removePoolMember', {'xmlns': TYPES_URN, 'id': member.id}) result = self.connection.request_with_orgId_api_2( 'networkDomainVip/removePoolMember', method='POST', data=ET.tostring(create_pool_m)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def destroy_balancer(self, balancer): """ Destroy a load balancer (virtual listener) :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :return: ``True`` if the destroy was successful, otherwise ``False``. :rtype: ``bool`` """ delete_listener = ET.Element('deleteVirtualListener', {'xmlns': TYPES_URN, 'id': balancer.id}) result = self.connection.request_with_orgId_api_2( 'networkDomainVip/deleteVirtualListener', method='POST', data=ET.tostring(delete_listener)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_set_current_network_domain(self, network_domain_id): """ Set the network domain (part of the network) of the driver :param network_domain_id: ID of the pool (required) :type network_domain_id: ``str`` """ self.network_domain_id = network_domain_id def ex_get_current_network_domain(self): """ Get the current network domain ID of the driver. :return: ID of the network domain :rtype: ``str`` """ return self.network_domain_id def ex_create_pool_member(self, pool, node, port=None): """ Create a new member in an existing pool from an existing node :param pool: Instance of ``DimensionDataPool`` (required) :type pool: ``DimensionDataPool`` :param node: Instance of ``DimensionDataVIPNode`` (required) :type node: ``DimensionDataVIPNode`` :param port: Port the the service will listen on :type port: ``str`` :return: The node member, instance of ``DimensionDataPoolMember`` :rtype: ``DimensionDataPoolMember`` """ create_pool_m = ET.Element('addPoolMember', {'xmlns': TYPES_URN}) ET.SubElement(create_pool_m, "poolId").text = pool.id ET.SubElement(create_pool_m, "nodeId").text = node.id if port is not None: ET.SubElement(create_pool_m, "port").text = str(port) ET.SubElement(create_pool_m, "status").text = 'ENABLED' response = self.connection.request_with_orgId_api_2( 'networkDomainVip/addPoolMember', method='POST', data=ET.tostring(create_pool_m)).object member_id = None node_name = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'poolMemberId': member_id = info.get('value') if info.get('name') == 'nodeName': node_name = info.get('value') return DimensionDataPoolMember( id=member_id, name=node_name, status=State.RUNNING, ip=node.ip, port=port, node_id=node.id ) def ex_create_node(self, network_domain_id, name, ip, ex_description, connection_limit=25000, connection_rate_limit=2000): """ Create a new node :param network_domain_id: Network Domain ID (required) :type name: ``str`` :param name: name of the node (required) :type name: ``str`` :param ip: IPv4 address of the node (required) :type ip: ``str`` :param ex_description: Description of the node (required) :type ex_description: ``str`` :param connection_limit: Maximum number of concurrent connections per sec :type connection_limit: ``int`` :param connection_rate_limit: Maximum number of concurrent sessions :type connection_rate_limit: ``int`` :return: Instance of ``DimensionDataVIPNode`` :rtype: ``DimensionDataVIPNode`` """ create_node_elm = ET.Element('createNode', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "networkDomainId") \ .text = network_domain_id ET.SubElement(create_node_elm, "name").text = name ET.SubElement(create_node_elm, "description").text \ = str(ex_description) ET.SubElement(create_node_elm, "ipv4Address").text = ip ET.SubElement(create_node_elm, "status").text = 'ENABLED' ET.SubElement(create_node_elm, "connectionLimit") \ .text = str(connection_limit) ET.SubElement(create_node_elm, "connectionRateLimit") \ .text = str(connection_rate_limit) response = self.connection.request_with_orgId_api_2( action='networkDomainVip/createNode', method='POST', data=ET.tostring(create_node_elm)).object node_id = None node_name = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'nodeId': node_id = info.get('value') if info.get('name') == 'name': node_name = info.get('value') return DimensionDataVIPNode( id=node_id, name=node_name, status=State.RUNNING, ip=ip ) def ex_update_node(self, node): """ Update the properties of a node :param pool: The instance of ``DimensionDataNode`` to update :type pool: ``DimensionDataNode`` :return: The instance of ``DimensionDataNode`` :rtype: ``DimensionDataNode`` """ create_node_elm = ET.Element('editNode', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "connectionLimit") \ .text = str(node.connection_limit) ET.SubElement(create_node_elm, "connectionRateLimit") \ .text = str(node.connection_rate_limit) self.connection.request_with_orgId_api_2( action='networkDomainVip/createNode', method='POST', data=ET.tostring(create_node_elm)).object return node def ex_set_node_state(self, node, enabled): """ Change the state of a node (enable/disable) :param pool: The instance of ``DimensionDataNode`` to update :type pool: ``DimensionDataNode`` :param enabled: The target state of the node :type enabled: ``bool`` :return: The instance of ``DimensionDataNode`` :rtype: ``DimensionDataNode`` """ create_node_elm = ET.Element('editNode', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "status") \ .text = "ENABLED" if enabled is True else "DISABLED" self.connection.request_with_orgId_api_2( action='networkDomainVip/editNode', method='POST', data=ET.tostring(create_node_elm)).object return node def ex_create_pool(self, network_domain_id, name, balancer_method, ex_description, health_monitors=None, service_down_action='NONE', slow_ramp_time=30): """ Create a new pool :param network_domain_id: Network Domain ID (required) :type name: ``str`` :param name: name of the node (required) :type name: ``str`` :param balancer_method: The load balancer algorithm (required) :type balancer_method: ``str`` :param ex_description: Description of the node (required) :type ex_description: ``str`` :param health_monitors: A list of health monitors to use for the pool. :type health_monitors: ``list`` of :class:`DimensionDataDefaultHealthMonitor` :param service_down_action: What to do when node is unavailable NONE, DROP or RESELECT :type service_down_action: ``str`` :param slow_ramp_time: Number of seconds to stagger ramp up of nodes :type slow_ramp_time: ``int`` :return: Instance of ``DimensionDataPool`` :rtype: ``DimensionDataPool`` """ # Names cannot contain spaces. name.replace(' ', '_') create_node_elm = ET.Element('createPool', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "networkDomainId") \ .text = network_domain_id ET.SubElement(create_node_elm, "name").text = name ET.SubElement(create_node_elm, "description").text \ = str(ex_description) ET.SubElement(create_node_elm, "loadBalanceMethod") \ .text = str(balancer_method) if health_monitors is not None: for monitor in health_monitors: ET.SubElement(create_node_elm, "healthMonitorId") \ .text = str(monitor.id) ET.SubElement(create_node_elm, "serviceDownAction") \ .text = service_down_action ET.SubElement(create_node_elm, "slowRampTime").text \ = str(slow_ramp_time) response = self.connection.request_with_orgId_api_2( action='networkDomainVip/createPool', method='POST', data=ET.tostring(create_node_elm)).object pool_id = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'poolId': pool_id = info.get('value') return DimensionDataPool( id=pool_id, name=name, description=ex_description, status=State.RUNNING, load_balance_method=str(balancer_method), health_monitor_id=None, service_down_action=service_down_action, slow_ramp_time=str(slow_ramp_time) ) def ex_create_virtual_listener(self, network_domain_id, name, ex_description, port=None, pool=None, listener_ip_address=None, persistence_profile=None, fallback_persistence_profile=None, irule=None, protocol='TCP', connection_limit=25000, connection_rate_limit=2000, source_port_preservation='PRESERVE'): """ Create a new virtual listener (load balancer) :param network_domain_id: Network Domain ID (required) :type name: ``str`` :param name: name of the listener (required) :type name: ``str`` :param ex_description: Description of the node (required) :type ex_description: ``str`` :param port: An integer in the range of 1-65535. If not supplied, it will be taken to mean 'Any Port' :type port: ``int`` :param pool: The pool to use for the listener :type pool: :class:`DimensionDataPool` :param listener_ip_address: The IPv4 Address of the virtual listener :type listener_ip_address: ``str`` :param persistence_profile: Persistence profile :type persistence_profile: :class:`DimensionDataPersistenceProfile` :param fallback_persistence_profile: Fallback persistence profile :type fallback_persistence_profile: :class:`DimensionDataPersistenceProfile` :param irule: The iRule to apply :type irule: :class:`DimensionDataDefaultiRule` :param protocol: For STANDARD type, ANY, TCP or UDP for PERFORMANCE_LAYER_4 choice of ANY, TCP, UDP, HTTP :type protcol: ``str`` :param connection_limit: Maximum number of concurrent connections per sec :type connection_limit: ``int`` :param connection_rate_limit: Maximum number of concurrent sessions :type connection_rate_limit: ``int`` :param source_port_preservation: Choice of PRESERVE, PRESERVE_STRICT or CHANGE :type source_port_preservation: ``str`` :return: Instance of the listener :rtype: ``DimensionDataVirtualListener`` """ if (port == 80) or (port == 443): listener_type = 'PERFORMANCE_LAYER_4' else: listener_type = 'STANDARD' create_node_elm = ET.Element('createVirtualListener', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "networkDomainId") \ .text = network_domain_id ET.SubElement(create_node_elm, "name").text = name ET.SubElement(create_node_elm, "description").text = \ str(ex_description) ET.SubElement(create_node_elm, "type").text = listener_type ET.SubElement(create_node_elm, "protocol") \ .text = protocol if listener_ip_address is not None: ET.SubElement(create_node_elm, "listenerIpAddress").text = \ str(listener_ip_address) if port is not None: ET.SubElement(create_node_elm, "port").text = str(port) ET.SubElement(create_node_elm, "enabled").text = 'true' ET.SubElement(create_node_elm, "connectionLimit") \ .text = str(connection_limit) ET.SubElement(create_node_elm, "connectionRateLimit") \ .text = str(connection_rate_limit) ET.SubElement(create_node_elm, "sourcePortPreservation") \ .text = source_port_preservation if pool is not None: ET.SubElement(create_node_elm, "poolId") \ .text = pool.id if persistence_profile is not None: ET.SubElement(create_node_elm, "persistenceProfileId") \ .text = persistence_profile.id if fallback_persistence_profile is not None: ET.SubElement(create_node_elm, "fallbackPersistenceProfileId") \ .text = fallback_persistence_profile.id if irule is not None: ET.SubElement(create_node_elm, "iruleId") \ .text = irule.id response = self.connection.request_with_orgId_api_2( action='networkDomainVip/createVirtualListener', method='POST', data=ET.tostring(create_node_elm)).object virtual_listener_id = None virtual_listener_ip = None for info in findall(response, 'info', TYPES_URN): if info.get('name') == 'virtualListenerId': virtual_listener_id = info.get('value') if info.get('name') == 'listenerIpAddress': virtual_listener_ip = info.get('value') return DimensionDataVirtualListener( id=virtual_listener_id, name=name, ip=virtual_listener_ip, status=State.RUNNING ) def ex_get_pools(self, ex_network_domain_id=None): """ Get all of the pools inside the current geography or in given network. :param ex_network_domain_id: UUID of Network Domain if not None returns only balancers in the given network if None then returns all pools for the organization :type ex_network_domain_id: ``str`` :return: Returns a ``list`` of type ``DimensionDataPool`` :rtype: ``list`` of ``DimensionDataPool`` """ params = None if ex_network_domain_id is not None: params = {"networkDomainId": ex_network_domain_id} pools = self.connection \ .request_with_orgId_api_2('networkDomainVip/pool', params=params).object return self._to_pools(pools) def ex_get_pool(self, pool_id): """ Get a specific pool inside the current geography :param pool_id: The identifier of the pool :type pool_id: ``str`` :return: Returns an instance of ``DimensionDataPool`` :rtype: ``DimensionDataPool`` """ pool = self.connection \ .request_with_orgId_api_2('networkDomainVip/pool/%s' % pool_id).object return self._to_pool(pool) def ex_update_pool(self, pool): """ Update the properties of an existing pool only method, serviceDownAction and slowRampTime are updated :param pool: The instance of ``DimensionDataPool`` to update :type pool: ``DimensionDataPool`` :return: ``True`` for success, ``False`` for failure :rtype: ``bool`` """ create_node_elm = ET.Element('editPool', {'xmlns': TYPES_URN}) ET.SubElement(create_node_elm, "loadBalanceMethod") \ .text = str(pool.load_balance_method) ET.SubElement(create_node_elm, "serviceDownAction") \ .text = pool.service_down_action ET.SubElement(create_node_elm, "slowRampTime").text \ = str(pool.slow_ramp_time) response = self.connection.request_with_orgId_api_2( action='networkDomainVip/editPool', method='POST', data=ET.tostring(create_node_elm)).object response_code = findtext(response, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_destroy_pool(self, pool): """ Destroy an existing pool :param pool: The instance of ``DimensionDataPool`` to destroy :type pool: ``DimensionDataPool`` :return: ``True`` for success, ``False`` for failure :rtype: ``bool`` """ destroy_request = ET.Element('deletePool', {'xmlns': TYPES_URN, 'id': pool.id}) result = self.connection.request_with_orgId_api_2( action='networkDomainVip/deletePool', method='POST', data=ET.tostring(destroy_request)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_get_pool_members(self, pool_id): """ Get the members of a pool :param pool: The instance of a pool :type pool: ``DimensionDataPool`` :return: Returns an ``list`` of ``DimensionDataPoolMember`` :rtype: ``list`` of ``DimensionDataPoolMember`` """ members = self.connection \ .request_with_orgId_api_2('networkDomainVip/poolMember?poolId=%s' % pool_id).object return self._to_members(members) def ex_get_pool_member(self, pool_member_id): """ Get a specific member of a pool :param pool: The id of a pool member :type pool: ``str`` :return: Returns an instance of ``DimensionDataPoolMember`` :rtype: ``DimensionDataPoolMember`` """ member = self.connection \ .request_with_orgId_api_2('networkDomainVip/poolMember/%s' % pool_member_id).object return self._to_member(member) def ex_set_pool_member_state(self, member, enabled=True): request = ET.Element('editPoolMember', {'xmlns': TYPES_URN, 'id': member.id}) state = "ENABLED" if enabled is True else "DISABLED" ET.SubElement(request, 'status').text = state result = self.connection.request_with_orgId_api_2( action='networkDomainVip/editPoolMember', method='POST', data=ET.tostring(request)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_destroy_pool_member(self, member, destroy_node=False): """ Destroy a specific member of a pool :param pool: The instance of a pool member :type pool: ``DimensionDataPoolMember`` :param destroy_node: Also destroy the associated node :type destroy_node: ``bool`` :return: ``True`` for success, ``False`` for failure :rtype: ``bool`` """ # remove the pool member destroy_request = ET.Element('removePoolMember', {'xmlns': TYPES_URN, 'id': member.id}) result = self.connection.request_with_orgId_api_2( action='networkDomainVip/removePoolMember', method='POST', data=ET.tostring(destroy_request)).object if member.node_id is not None and destroy_node is True: return self.ex_destroy_node(member.node_id) else: response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_get_nodes(self, ex_network_domain_id=None): """ Get the nodes within this geography or in given network. :param ex_network_domain_id: UUID of Network Domain if not None returns only balancers in the given network if None then returns all pools for the organization :type ex_network_domain_id: ``str`` :return: Returns an ``list`` of ``DimensionDataVIPNode`` :rtype: ``list`` of ``DimensionDataVIPNode`` """ params = None if ex_network_domain_id is not None: params = {"networkDomainId": ex_network_domain_id} nodes = self.connection \ .request_with_orgId_api_2('networkDomainVip/node', params=params).object return self._to_nodes(nodes) def ex_get_node(self, node_id): """ Get the node specified by node_id :return: Returns an instance of ``DimensionDataVIPNode`` :rtype: Instance of ``DimensionDataVIPNode`` """ nodes = self.connection \ .request_with_orgId_api_2('networkDomainVip/node/%s' % node_id).object return self._to_node(nodes) def ex_destroy_node(self, node_id): """ Destroy a specific node :param node_id: The ID of of a ``DimensionDataVIPNode`` :type node_id: ``str`` :return: ``True`` for success, ``False`` for failure :rtype: ``bool`` """ # Destroy the node destroy_request = ET.Element('deleteNode', {'xmlns': TYPES_URN, 'id': node_id}) result = self.connection.request_with_orgId_api_2( action='networkDomainVip/deleteNode', method='POST', data=ET.tostring(destroy_request)).object response_code = findtext(result, 'responseCode', TYPES_URN) return response_code in ['IN_PROGRESS', 'OK'] def ex_wait_for_state(self, state, func, poll_interval=2, timeout=60, *args, **kwargs): """ Wait for the function which returns a instance with field status to match Keep polling func until one of the desired states is matched :param state: Either the desired state (`str`) or a `list` of states :type state: ``str`` or ``list`` :param func: The function to call, e.g. ex_get_vlan :type func: ``function`` :param poll_interval: The number of seconds to wait between checks :type poll_interval: `int` :param timeout: The total number of seconds to wait to reach a state :type timeout: `int` :param args: The arguments for func :type args: Positional arguments :param kwargs: The arguments for func :type kwargs: Keyword arguments """ return self.connection.wait_for_state(state, func, poll_interval, timeout, *args, **kwargs) def ex_get_default_health_monitors(self, network_domain_id): """ Get the default health monitors available for a network domain :param network_domain_id: The ID of of a ``DimensionDataNetworkDomain`` :type network_domain_id: ``str`` :rtype: `list` of :class:`DimensionDataDefaultHealthMonitor` """ result = self.connection.request_with_orgId_api_2( action='networkDomainVip/defaultHealthMonitor', params={'networkDomainId': network_domain_id}, method='GET').object return self._to_health_monitors(result) def ex_get_default_persistence_profiles(self, network_domain_id): """ Get the default persistence profiles available for a network domain :param network_domain_id: The ID of of a ``DimensionDataNetworkDomain`` :type network_domain_id: ``str`` :rtype: `list` of :class:`DimensionDataPersistenceProfile` """ result = self.connection.request_with_orgId_api_2( action='networkDomainVip/defaultPersistenceProfile', params={'networkDomainId': network_domain_id}, method='GET').object return self._to_persistence_profiles(result) def ex_get_default_irules(self, network_domain_id): """ Get the default iRules available for a network domain :param network_domain_id: The ID of of a ``DimensionDataNetworkDomain`` :type network_domain_id: ``str`` :rtype: `list` of :class:`DimensionDataDefaultiRule` """ result = self.connection.request_with_orgId_api_2( action='networkDomainVip/defaultIrule', params={'networkDomainId': network_domain_id}, method='GET').object return self._to_irules(result) def _to_irules(self, object): irules = [] matches = object.findall( fixxpath('defaultIrule', TYPES_URN)) for element in matches: irules.append(self._to_irule(element)) return irules def _to_irule(self, element): compatible = [] matches = element.findall( fixxpath('virtualListenerCompatibility', TYPES_URN)) for match_element in matches: compatible.append( DimensionDataVirtualListenerCompatibility( type=match_element.get('type'), protocol=match_element.get('protocol', None))) irule_element = element.find(fixxpath('irule', TYPES_URN)) return DimensionDataDefaultiRule( id=irule_element.get('id'), name=irule_element.get('name'), compatible_listeners=compatible ) def _to_persistence_profiles(self, object): profiles = [] matches = object.findall( fixxpath('defaultPersistenceProfile', TYPES_URN)) for element in matches: profiles.append(self._to_persistence_profile(element)) return profiles def _to_persistence_profile(self, element): compatible = [] matches = element.findall( fixxpath('virtualListenerCompatibility', TYPES_URN)) for match_element in matches: compatible.append( DimensionDataVirtualListenerCompatibility( type=match_element.get('type'), protocol=match_element.get('protocol', None))) return DimensionDataPersistenceProfile( id=element.get('id'), fallback_compatible=bool( element.get('fallbackCompatible') == "true"), name=findtext(element, 'name', TYPES_URN), compatible_listeners=compatible ) def _to_health_monitors(self, object): monitors = [] matches = object.findall(fixxpath('defaultHealthMonitor', TYPES_URN)) for element in matches: monitors.append(self._to_health_monitor(element)) return monitors def _to_health_monitor(self, element): return DimensionDataDefaultHealthMonitor( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), node_compatible=bool( findtext(element, 'nodeCompatible', TYPES_URN) == "true"), pool_compatible=bool( findtext(element, 'poolCompatible', TYPES_URN) == "true"), ) def _to_nodes(self, object): nodes = [] for element in object.findall(fixxpath("node", TYPES_URN)): nodes.append(self._to_node(element)) return nodes def _to_node(self, element): ipaddress = findtext(element, 'ipv4Address', TYPES_URN) if ipaddress is None: ipaddress = findtext(element, 'ipv6Address', TYPES_URN) name = findtext(element, 'name', TYPES_URN) node = DimensionDataVIPNode( id=element.get('id'), name=name, status=self._VALUE_TO_STATE_MAP.get( findtext(element, 'state', TYPES_URN), State.UNKNOWN), connection_rate_limit=findtext(element, 'connectionRateLimit', TYPES_URN), connection_limit=findtext(element, 'connectionLimit', TYPES_URN), ip=ipaddress) return node def _to_balancers(self, object): loadbalancers = [] for element in object.findall(fixxpath("virtualListener", TYPES_URN)): loadbalancers.append(self._to_balancer(element)) return loadbalancers def _to_balancer(self, element): ipaddress = findtext(element, 'listenerIpAddress', TYPES_URN) name = findtext(element, 'name', TYPES_URN) port = findtext(element, 'port', TYPES_URN) extra = {} pool_element = element.find(fixxpath( 'pool', TYPES_URN)) if pool_element is None: extra['pool_id'] = None else: extra['pool_id'] = pool_element.get('id') extra['network_domain_id'] = findtext(element, 'networkDomainId', TYPES_URN) balancer = LoadBalancer( id=element.get('id'), name=name, state=self._VALUE_TO_STATE_MAP.get( findtext(element, 'state', TYPES_URN), State.UNKNOWN), ip=ipaddress, port=port, driver=self.connection.driver, extra=extra ) return balancer def _to_members(self, object): members = [] for element in object.findall(fixxpath("poolMember", TYPES_URN)): members.append(self._to_member(element)) return members def _to_member(self, element): port = findtext(element, 'port', TYPES_URN) if port is not None: port = int(port) pool_member = DimensionDataPoolMember( id=element.get('id'), name=element.find(fixxpath( 'node', TYPES_URN)).get('name'), status=findtext(element, 'state', TYPES_URN), node_id=element.find(fixxpath( 'node', TYPES_URN)).get('id'), ip=element.find(fixxpath( 'node', TYPES_URN)).get('ipAddress'), port=port ) return pool_member def _to_pools(self, object): pools = [] for element in object.findall(fixxpath("pool", TYPES_URN)): pools.append(self._to_pool(element)) return pools def _to_pool(self, element): pool = DimensionDataPool( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), status=findtext(element, 'state', TYPES_URN), description=findtext(element, 'description', TYPES_URN), load_balance_method=findtext(element, 'loadBalanceMethod', TYPES_URN), health_monitor_id=findtext(element, 'healthMonitorId', TYPES_URN), service_down_action=findtext(element, 'serviceDownAction', TYPES_URN), slow_ramp_time=findtext(element, 'slowRampTime', TYPES_URN), ) return pool apache-libcloud-2.2.1/libcloud/loadbalancer/base.py0000664000175000017500000002440612701023453022111 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import ConnectionKey, BaseDriver from libcloud.common.types import LibcloudError __all__ = [ 'Member', 'LoadBalancer', 'Algorithm', 'Driver', 'DEFAULT_ALGORITHM' ] class Member(object): """ Represents a load balancer member. """ def __init__(self, id, ip, port, balancer=None, extra=None): """ :param id: Member ID. :type id: ``str`` :param ip: IP address of this member. :param ip: ``str`` :param port: Port of this member :param port: ``str`` :param balancer: Balancer this member is attached to. (optional) :param balancer: :class:`.LoadBalancer` :param extra: Provider specific attributes. :type extra: ``dict`` """ self.id = str(id) if id else None self.ip = ip self.port = port self.balancer = balancer self.extra = extra or {} def __repr__(self): return ('' % (self.id, self.ip, self.port)) class LoadBalancer(object): """ Provide a common interface for handling Load Balancers. """ def __init__(self, id, name, state, ip, port, driver, extra=None): """ :param id: Load balancer ID. :type id: ``str`` :param name: Load balancer name. :type name: ``str`` :param state: State this loadbalancer is in. :type state: :class:`libcloud.loadbalancer.types.State` :param ip: IP address of this loadbalancer. :type ip: ``str`` :param port: Port of this loadbalancer. :type port: ``int`` :param driver: Driver this loadbalancer belongs to. :type driver: :class:`.Driver` :param extra: Provider specific attributes. (optional) :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.state = state self.ip = ip self.port = port self.driver = driver self.extra = extra or {} def attach_compute_node(self, node): return self.driver.balancer_attach_compute_node(balancer=self, node=node) def attach_member(self, member): return self.driver.balancer_attach_member(balancer=self, member=member) def detach_member(self, member): return self.driver.balancer_detach_member(balancer=self, member=member) def list_members(self): return self.driver.balancer_list_members(balancer=self) def destroy(self): return self.driver.destroy_balancer(balancer=self) def __repr__(self): return ('' % (self.id, self.name, self.state, self.ip, self.port)) class Algorithm(object): """ Represents a load balancing algorithm. """ RANDOM = 0 ROUND_ROBIN = 1 LEAST_CONNECTIONS = 2 WEIGHTED_ROUND_ROBIN = 3 WEIGHTED_LEAST_CONNECTIONS = 4 SHORTEST_RESPONSE = 5 PERSISTENT_IP = 6 DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN class Driver(BaseDriver): """ A base Driver class to derive from This class is always subclassed by a specific driver. """ name = None website = None connectionCls = ConnectionKey _ALGORITHM_TO_VALUE_MAP = {} _VALUE_TO_ALGORITHM_MAP = {} def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): super(Driver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def list_protocols(self): """ Return a list of supported protocols. :rtype: ``list`` of ``str`` """ raise NotImplementedError( 'list_protocols not implemented for this driver') def list_balancers(self): """ List all loadbalancers :rtype: ``list`` of :class:`LoadBalancer` """ raise NotImplementedError( 'list_balancers not implemented for this driver') def create_balancer(self, name, port, protocol, algorithm, members): """ Create a new load balancer instance :param name: Name of the new load balancer (required) :type name: ``str`` :param port: Port the load balancer should listen on, defaults to 80 :type port: ``str`` :param protocol: Loadbalancer protocol, defaults to http. :type protocol: ``str`` :param members: list of Members to attach to balancer :type members: ``list`` of :class:`Member` :param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. :type algorithm: :class:`.Algorithm` :rtype: :class:`LoadBalancer` """ raise NotImplementedError( 'create_balancer not implemented for this driver') def destroy_balancer(self, balancer): """ Destroy a load balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :return: ``True`` if the destroy was successful, otherwise ``False``. :rtype: ``bool`` """ raise NotImplementedError( 'destroy_balancer not implemented for this driver') def get_balancer(self, balancer_id): """ Return a :class:`LoadBalancer` object. :param balancer_id: id of a load balancer you want to fetch :type balancer_id: ``str`` :rtype: :class:`LoadBalancer` """ raise NotImplementedError( 'get_balancer not implemented for this driver') def update_balancer(self, balancer, **kwargs): """ Sets the name, algorithm, protocol, or port on a load balancer. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param name: New load balancer name :type name: ``str`` :param algorithm: New load balancer algorithm :type algorithm: :class:`.Algorithm` :param protocol: New load balancer protocol :type protocol: ``str`` :param port: New load balancer port :type port: ``int`` :rtype: :class:`LoadBalancer` """ raise NotImplementedError( 'update_balancer not implemented for this driver') def balancer_attach_compute_node(self, balancer, node): """ Attach a compute node as a member to the load balancer. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param node: Node to join to the balancer :type node: :class:`Node` :return: Member after joining the balancer. :rtype: :class:`Member` """ member = Member(id=None, ip=node.public_ips[0], port=balancer.port) return self.balancer_attach_member(balancer, member) def balancer_attach_member(self, balancer, member): """ Attach a member to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member to join to the balancer :type member: :class:`Member` :return: Member after joining the balancer. :rtype: :class:`Member` """ raise NotImplementedError( 'balancer_attach_member not implemented for this driver') def balancer_detach_member(self, balancer, member): """ Detach member from balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param member: Member which should be used :type member: :class:`Member` :return: ``True`` if member detach was successful, otherwise ``False``. :rtype: ``bool`` """ raise NotImplementedError( 'balancer_detach_member not implemented for this driver') def balancer_list_members(self, balancer): """ Return list of members attached to balancer :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :rtype: ``list`` of :class:`Member` """ raise NotImplementedError( 'balancer_list_members not implemented for this driver') def list_supported_algorithms(self): """ Return algorithms supported by this driver. :rtype: ``list`` of ``str`` """ return list(self._ALGORITHM_TO_VALUE_MAP.keys()) def _value_to_algorithm(self, value): """ Return :class:`.Algorithm` based on the value. :param value: Algorithm name (e.g. http, tcp, ...). :type value: ``str`` :rtype: :class:`.Algorithm` """ try: return self._VALUE_TO_ALGORITHM_MAP[value] except KeyError: raise LibcloudError(value='Invalid value: %s' % (value), driver=self) def _algorithm_to_value(self, algorithm): """ Return string value for the provided algorithm. :param value: Algorithm enum. :type value: :class:`Algorithm` :rtype: ``str`` """ try: return self._ALGORITHM_TO_VALUE_MAP[algorithm] except KeyError: raise LibcloudError(value='Invalid algorithm: %s' % (algorithm), driver=self) apache-libcloud-2.2.1/libcloud/base.py0000664000175000017500000000532213153541406017503 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.backup.providers import Provider as BackupProvider from libcloud.backup.providers import get_driver as get_backup_driver from libcloud.compute.providers import Provider as ComputeProvider from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.container.providers import Provider as ContainerProvider from libcloud.container.providers import get_driver as get_container_driver from libcloud.dns.providers import Provider as DnsProvider from libcloud.dns.providers import get_driver as get_dns_driver from libcloud.loadbalancer.providers import Provider as LoadBalancerProvider from libcloud.loadbalancer.providers import get_driver as \ get_loadbalancer_driver from libcloud.storage.providers import Provider as StorageProvider from libcloud.storage.providers import get_driver as get_storage_driver class DriverType(object): """ Backup-as-a-service driver """ BACKUP = BackupProvider """ Compute-as-a-Service driver """ COMPUTE = ComputeProvider """ Container-as-a-Service driver """ CONTAINER = ContainerProvider """ DNS service provider driver """ DNS = DnsProvider """ Load balancer provider-driver """ LOADBALANCER = LoadBalancerProvider """ Storage-as-a-Service driver """ STORAGE = StorageProvider DriverTypeFactoryMap = { DriverType.BACKUP: get_backup_driver, DriverType.COMPUTE: get_compute_driver, DriverType.CONTAINER: get_container_driver, DriverType.DNS: get_dns_driver, DriverType.LOADBALANCER: get_loadbalancer_driver, DriverType.STORAGE: get_storage_driver } class DriverTypeNotFoundError(KeyError): def __init__(self, type): self.message = "Driver type '%s' not found." % type def __repr__(self): return self.message def get_driver(type, provider): """ Get a driver """ try: return DriverTypeFactoryMap[type](provider) except KeyError: raise DriverTypeNotFoundError(type) apache-libcloud-2.2.1/libcloud/utils/0000775000175000017500000000000013160535110017346 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/utils/connection.py0000664000175000017500000000331212701023453022060 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import urlparse, parse_qs from libcloud.common.base import Connection __all__ = [ 'get_response_object' ] def get_response_object(url, method='GET', headers=None): """ Utility function which uses libcloud's connection class to issue an HTTP request. :param url: URL to send the request to. :type url: ``str`` :param method: HTTP method. :type method: ``str`` :param headers: Optional request headers. :type headers: ``dict`` :return: Response object. :rtype: :class:`Response`. """ parsed_url = urlparse.urlparse(url) parsed_qs = parse_qs(parsed_url.query) secure = parsed_url.scheme == 'https' headers = headers or {} method = method.upper() con = Connection(secure=secure, host=parsed_url.netloc) response = con.request(action=parsed_url.path, params=parsed_qs, headers=headers, method=method) return response apache-libcloud-2.2.1/libcloud/utils/__init__.py0000664000175000017500000000326712701023453021471 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings __all__ = [ 'SHOW_DEPRECATION_WARNING', 'SHOW_IN_DEVELOPMENT_WARNING', 'OLD_API_REMOVE_VERSION', 'deprecated_warning', 'in_development_warning' ] SHOW_DEPRECATION_WARNING = True SHOW_IN_DEVELOPMENT_WARNING = True OLD_API_REMOVE_VERSION = '0.7.0' def deprecated_warning(module): if SHOW_DEPRECATION_WARNING: warnings.warn('This path has been deprecated and the module' ' is now available at "libcloud.compute.%s".' ' This path will be fully removed in libcloud %s.' % (module, OLD_API_REMOVE_VERSION), category=DeprecationWarning) def in_development_warning(module): if SHOW_IN_DEVELOPMENT_WARNING: warnings.warn('The module %s is in development and your are advised ' 'against using it in production.' % (module), category=FutureWarning) apache-libcloud-2.2.1/libcloud/utils/files.py0000664000175000017500000000734512701023453021035 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mimetypes from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import next from libcloud.utils.py3 import b if PY3: from io import FileIO as file CHUNK_SIZE = 8096 __all__ = [ 'read_in_chunks', 'exhaust_iterator', 'guess_file_mime_type' ] def read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False): """ Return a generator which yields data in chunks. :param iterator: An object which implements an iterator interface or a File like object with read method. :type iterator: :class:`object` which implements iterator interface. :param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) :type chunk_size: ``int`` :param fill_size: If True, make sure chunks are exactly chunk_size in length (except for last chunk). :type fill_size: ``bool`` :param yield_empty: If true and iterator returned no data, yield empty bytes object before raising StopIteration. :type yield_empty: ``bool`` TODO: At some point in the future we could use byte arrays here if version >= Python 3. This should speed things up a bit and reduce memory usage. """ chunk_size = chunk_size or CHUNK_SIZE if isinstance(iterator, (file, httplib.HTTPResponse)): get_data = iterator.read args = (chunk_size, ) else: get_data = next args = (iterator, ) data = b('') empty = False while not empty or len(data) > 0: if not empty: try: chunk = b(get_data(*args)) if len(chunk) > 0: data += chunk else: empty = True except StopIteration: empty = True if len(data) == 0: if empty and yield_empty: yield b('') raise StopIteration if fill_size: if empty or len(data) >= chunk_size: yield data[:chunk_size] data = data[chunk_size:] else: yield data data = b('') def exhaust_iterator(iterator): """ Exhaust an iterator and return all data returned by it. :type iterator: :class:`object` which implements iterator interface. :param iterator: An object which implements an iterator interface or a File like object with read method. :rtype ``str`` :return Data returned by the iterator. """ data = b('') try: chunk = b(next(iterator)) except StopIteration: chunk = b('') while len(chunk) > 0: data += chunk try: chunk = b(next(iterator)) except StopIteration: chunk = b('') return data def guess_file_mime_type(file_path): filename = os.path.basename(file_path) (mimetype, encoding) = mimetypes.guess_type(filename) return mimetype, encoding apache-libcloud-2.2.1/libcloud/utils/py3.py0000664000175000017500000001516713153541406020454 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Libcloud Python 2.x and 3.x compatibility layer # Some methods below are taken from Django PYK3 port which is licensed under 3 # clause BSD license # https://bitbucket.org/loewis/django-3k # pylint: disable=import-error from __future__ import absolute_import import sys import types DEFAULT_LXML = False try: if DEFAULT_LXML: from lxml import etree as ET else: from xml.etree import ElementTree as ET except ImportError: from xml.etree import ElementTree as ET PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY2_pre_25 = PY2 and sys.version_info < (2, 5) PY2_pre_26 = PY2 and sys.version_info < (2, 6) PY2_pre_27 = PY2 and sys.version_info < (2, 7) PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9) PY3_pre_32 = PY3 and sys.version_info < (3, 2) PY2 = False PY25 = False PY26 = False PY27 = False PY3 = False PY32 = False if sys.version_info >= (2, 0) and sys.version_info < (3, 0): PY2 = True if sys.version_info >= (2, 5) and sys.version_info < (2, 6): PY25 = True if sys.version_info >= (2, 6) and sys.version_info < (2, 7): PY26 = True if sys.version_info >= (2, 7) and sys.version_info < (2, 8): PY27 = True if sys.version_info >= (3, 0): PY3 = True if sys.version_info >= (3, 2) and sys.version_info < (3, 3): PY32 = True if PY2_pre_279 or PY3_pre_32: try: from backports.ssl_match_hostname import match_hostname, CertificateError # NOQA except ImportError: import warnings warnings.warn("Missing backports.ssl_match_hostname package") else: # ssl module in Python >= 3.2 includes match hostname function from ssl import match_hostname, CertificateError # NOQA if PY3: import http.client as httplib from io import StringIO import urllib import urllib as urllib2 # pylint: disable=no-name-in-module import urllib.parse as urlparse import xmlrpc.client as xmlrpclib from urllib.parse import quote as urlquote from urllib.parse import unquote as urlunquote from urllib.parse import urlencode as urlencode from os.path import relpath from imp import reload from builtins import bytes from builtins import next parse_qs = urlparse.parse_qs parse_qsl = urlparse.parse_qsl basestring = str def method_type(callable, instance, klass): return types.MethodType(callable, instance or klass()) def b(s): if isinstance(s, str): return s.encode('utf-8') elif isinstance(s, bytes): return s elif isinstance(s, int): return bytes([s]) else: raise TypeError("Invalid argument %r for b()" % (s,)) def ensure_string(s): if isinstance(s, str): return s elif isinstance(s, bytes): return s.decode('utf-8') else: raise TypeError("Invalid argument %r for ensure_string()" % (s,)) def byte(n): # assume n is a Latin-1 string of length 1 return ord(n) _real_unicode = str u = str def bchr(s): """Take an integer and make a 1-character byte string.""" return bytes([s]) def dictvalues(d): return list(d.values()) def tostring(node): return ET.tostring(node, encoding='unicode') def hexadigits(s): # s needs to be a byte string. return [format(x, "x") for x in s] else: import httplib # NOQA from StringIO import StringIO # NOQA import urllib # NOQA import urllib2 # NOQA import urlparse # NOQA import xmlrpclib # NOQA from urllib import quote as _urlquote # NOQA from urllib import unquote as urlunquote # NOQA from urllib import urlencode as urlencode # NOQA from __builtin__ import reload # NOQA if PY25: import cgi parse_qs = cgi.parse_qs parse_qsl = cgi.parse_qsl else: parse_qs = urlparse.parse_qs parse_qsl = urlparse.parse_qsl if not PY25: from os.path import relpath # NOQA # Save the real value of unicode because urlquote needs it to tell the # difference between a unicode string and a byte string. _real_unicode = unicode basestring = unicode = str method_type = types.MethodType b = bytes = ensure_string = str def byte(n): return n u = unicode def bchr(s): """Take an integer and make a 1-character byte string.""" return chr(s) _default_value_next = object() def next(iterator, default=_default_value_next): try: return iterator.next() except StopIteration: if default is _default_value_next: raise return default def dictvalues(d): return d.values() tostring = ET.tostring def urlquote(s, safe='/'): if isinstance(s, _real_unicode): # Pretend to be py3 by encoding the URI automatically. s = s.encode('utf8') return _urlquote(s, safe) def hexadigits(s): # s needs to be a string. return [x.encode("hex") for x in s] if PY25: import posixpath # Taken from http://jimmyg.org/work/code/barenecessities/index.html # (MIT license) # pylint: disable=function-redefined def relpath(path, start=posixpath.curdir): # NOQA """Return a relative version of a path""" if not path: raise ValueError("no path specified") start_list = posixpath.abspath(start).split(posixpath.sep) path_list = posixpath.abspath(path).split(posixpath.sep) # Work out how much of the filepath is shared by start and path. i = len(posixpath.commonprefix([start_list, path_list])) rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:] if not rel_list: return posixpath.curdir return posixpath.join(*rel_list) if PY27 or PY3: unittest2_required = False else: unittest2_required = True apache-libcloud-2.2.1/libcloud/utils/misc.py0000664000175000017500000002146712701223644020673 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import binascii import socket import time import ssl from datetime import datetime, timedelta from functools import wraps from libcloud.utils.py3 import httplib from libcloud.common.exceptions import RateLimitReachedError from libcloud.common.providers import get_driver as _get_driver from libcloud.common.providers import set_driver as _set_driver __all__ = [ 'find', 'get_driver', 'set_driver', 'merge_valid_keys', 'get_new_obj', 'str2dicts', 'dict2str', 'reverse_dict', 'lowercase_keys', 'get_secure_random_string', 'retry', 'ReprMixin' ] # Error message which indicates a transient SSL error upon which request # can be retried TRANSIENT_SSL_ERROR = 'The read operation timed out' class TransientSSLError(ssl.SSLError): """Represent transient SSL errors, e.g. timeouts""" pass # Constants used by the ``retry`` decorator DEFAULT_TIMEOUT = 30 # default retry timeout DEFAULT_DELAY = 1 # default sleep delay used in each iterator DEFAULT_BACKOFF = 1 # retry backup multiplier RETRY_EXCEPTIONS = (RateLimitReachedError, socket.error, socket.gaierror, httplib.NotConnected, httplib.ImproperConnectionState, TransientSSLError) def find(l, predicate): results = [x for x in l if predicate(x)] return results[0] if len(results) > 0 else None # Note: Those are aliases for backward-compatibility for functions which have # been moved to "libcloud.common.providers" module get_driver = _get_driver set_driver = _set_driver def merge_valid_keys(params, valid_keys, extra): """ Merge valid keys from extra into params dictionary and return dictionary with keys which have been merged. Note: params is modified in place. """ merged = {} if not extra: return merged for key in valid_keys: if key in extra: params[key] = extra[key] merged[key] = extra[key] return merged def get_new_obj(obj, klass, attributes): """ Pass attributes from the existing object 'obj' and attributes dictionary to a 'klass' constructor. Attributes from 'attributes' dictionary are only passed to the constructor if they are not None. """ kwargs = {} for key, value in list(obj.__dict__.items()): if isinstance(value, dict): kwargs[key] = value.copy() elif isinstance(value, (tuple, list)): kwargs[key] = value[:] else: kwargs[key] = value for key, value in list(attributes.items()): if value is None: continue if isinstance(value, dict): kwargs_value = kwargs.get(key, {}) for key1, value2 in list(value.items()): if value2 is None: continue kwargs_value[key1] = value2 kwargs[key] = kwargs_value else: kwargs[key] = value return klass(**kwargs) def str2dicts(data): """ Create a list of dictionaries from a whitespace and newline delimited text. For example, this: cpu 1100 ram 640 cpu 2200 ram 1024 becomes: [{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}] """ list_data = [] list_data.append({}) d = list_data[-1] lines = data.split('\n') for line in lines: line = line.strip() if not line: d = {} list_data.append(d) d = list_data[-1] continue whitespace = line.find(' ') if not whitespace: continue key = line[0:whitespace] value = line[whitespace + 1:] d.update({key: value}) list_data = [val for val in list_data if val != {}] return list_data def str2list(data): """ Create a list of values from a whitespace and newline delimited text (keys are ignored). For example, this: ip 1.2.3.4 ip 1.2.3.5 ip 1.2.3.6 becomes: ['1.2.3.4', '1.2.3.5', '1.2.3.6'] """ list_data = [] for line in data.split('\n'): line = line.strip() if not line: continue try: splitted = line.split(' ') # key = splitted[0] value = splitted[1] except Exception: continue list_data.append(value) return list_data def dict2str(data): """ Create a string with a whitespace and newline delimited text from a dictionary. For example, this: {'cpu': '1100', 'ram': '640', 'smp': 'auto'} becomes: cpu 1100 ram 640 smp auto cpu 2200 ram 1024 """ result = '' for k in data: if data[k] is not None: result += '%s %s\n' % (str(k), str(data[k])) else: result += '%s\n' % str(k) return result def reverse_dict(dictionary): return dict([(value, key) for key, value in list(dictionary.items())]) def lowercase_keys(dictionary): return dict(((k.lower(), v) for k, v in dictionary.items())) def get_secure_random_string(size): """ Return a string of ``size`` random bytes. Returned string is suitable for cryptographic use. :param size: Size of the generated string. :type size: ``int`` :return: Random string. :rtype: ``str`` """ value = os.urandom(size) value = binascii.hexlify(value) value = value.decode('utf-8')[:size] return value class ReprMixin(object): """ Mixin class which adds __repr__ and __str__ methods for the attributes specified on the class. """ _repr_attributes = [] def __repr__(self): attributes = [] for attribute in self._repr_attributes: value = getattr(self, attribute, None) attributes.append('%s=%s' % (attribute, value)) values = (self.__class__.__name__, ', '.join(attributes)) result = '<%s %s>' % values return result def __str__(self): return str(self.__repr__()) def retry(retry_exceptions=RETRY_EXCEPTIONS, retry_delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT, backoff=DEFAULT_BACKOFF): """ Retry decorator that helps to handle common transient exceptions. :param retry_exceptions: types of exceptions to retry on. :param retry_delay: retry delay between the attempts. :param timeout: maximum time to wait. :param backoff: multiplier added to delay between attempts. :Example: retry_request = retry(timeout=1, retry_delay=1, backoff=1) retry_request(self.connection.request)() """ if retry_exceptions is None: retry_exceptions = RETRY_EXCEPTIONS if retry_delay is None: retry_delay = DEFAULT_DELAY if timeout is None: timeout = DEFAULT_TIMEOUT if backoff is None: backoff = DEFAULT_BACKOFF timeout = max(timeout, 0) def transform_ssl_error(func, *args, **kwargs): try: return func(*args, **kwargs) except ssl.SSLError: exc = sys.exc_info()[1] if TRANSIENT_SSL_ERROR in str(exc): raise TransientSSLError(*exc.args) raise exc def decorator(func): @wraps(func) def retry_loop(*args, **kwargs): current_delay = retry_delay end = datetime.now() + timedelta(seconds=timeout) while True: try: return transform_ssl_error(func, *args, **kwargs) except retry_exceptions: exc = sys.exc_info()[1] if isinstance(exc, RateLimitReachedError): time.sleep(exc.retry_after) # Reset retries if we're told to wait due to rate # limiting current_delay = retry_delay end = datetime.now() + timedelta( seconds=exc.retry_after + timeout) elif datetime.now() >= end: raise else: time.sleep(current_delay) current_delay *= backoff return retry_loop return decorator apache-libcloud-2.2.1/libcloud/utils/iso8601.py0000664000175000017500000001031612701023453021034 0ustar kamikami00000000000000""" Copyright (c) 2007 Michael Twomey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ISO 8601 date time string parsing Basic usage: >>> import iso8601 >>> iso8601.parse_date("2007-01-25T12:00:00Z") datetime.datetime(2007, 1, 25, 12, 0, tzinfo=) >>> """ # Taken from pyiso8601 which is licensed under the MIT license. from datetime import datetime, timedelta, tzinfo import re __all__ = ["parse_date", "ParseError"] # Adapted from http://delete.me.uk/2005/03/iso8601.html ISO8601_REGEX = re.compile( r"(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})" r"((?P.)(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?" # NOQA r"(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?") TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})") # NOQA class ParseError(Exception): """Raised when there is a problem parsing a date string""" # Yoinked from python docs ZERO = timedelta(0) class Utc(tzinfo): """UTC """ def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO UTC = Utc() class FixedOffset(tzinfo): """Fixed offset in hours and minutes from UTC """ def __init__(self, offset_hours, offset_minutes, name): self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO def __repr__(self): return "" % self.__name def parse_timezone(tzstring, default_timezone=UTC): """Parses ISO 8601 time zone specs into tzinfo offsets """ if tzstring == "Z": return default_timezone # This isn't strictly correct, but it's common to encounter dates without # timezones so I'll assume the default (which defaults to UTC). # Addresses issue 4. if tzstring is None: return default_timezone m = TIMEZONE_REGEX.match(tzstring) prefix, hours, minutes = m.groups() hours, minutes = int(hours), int(minutes) if prefix == "-": hours = -hours minutes = -minutes return FixedOffset(hours, minutes, tzstring) def parse_date(datestring, default_timezone=UTC): """Parses ISO 8601 dates into datetime objects The timezone is parsed from the date string. However it is quite common to have dates without a timezone (not strictly correct). In this case the default timezone specified in default_timezone is used. This is UTC by default. """ m = ISO8601_REGEX.match(datestring) if not m: raise ParseError("Unable to parse date string %r" % datestring) groups = m.groupdict() tz = parse_timezone(groups["timezone"], default_timezone=default_timezone) if groups["fraction"] is None: groups["fraction"] = 0 else: groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), int(groups["hour"]), int(groups["minute"]), int(groups["second"]), int(groups["fraction"]), tz) apache-libcloud-2.2.1/libcloud/utils/loggingconnection.py0000664000175000017500000001200513153541406023433 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import try: import simplejson as json except: import json from pipes import quote as pquote from xml.dom.minidom import parseString import os from libcloud.common.base import (LibcloudConnection, HttpLibResponseProxy) from libcloud.utils.py3 import _real_unicode as u from libcloud.utils.misc import lowercase_keys class LoggingConnection(LibcloudConnection): """ Debug class to log all HTTP(s) requests as they could be made with the curl command. :cvar log: file-like object that logs entries are written to. """ protocol = 'https' log = None http_proxy_used = False def _log_response(self, r): rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) ht = "" v = r.version if r.version == 10: v = "HTTP/1.0" if r.version == 11: v = "HTTP/1.1" ht += "%s %s %s\r\n" % (v, r.status, r.reason) body = r.read() for h in r.getheaders(): ht += "%s: %s\r\n" % (h[0].title(), h[1]) ht += "\r\n" headers = lowercase_keys(dict(r.getheaders())) content_type = headers.get('content-type', None) pretty_print = os.environ.get('LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE', False) if pretty_print and content_type == 'application/json': try: body = json.loads(body.decode('utf-8')) body = json.dumps(body, sort_keys=True, indent=4) except: # Invalid JSON or server is lying about content-type pass elif pretty_print and content_type == 'text/xml': try: elem = parseString(body.decode('utf-8')) body = elem.toprettyxml() except Exception: # Invalid XML pass ht += u(body) rv += ht rv += ("\n# -------- end %d:%d response ----------\n" % (id(self), id(r))) return rv def _log_curl(self, method, url, body, headers): cmd = ["curl"] if self.http_proxy_used: if self.proxy_username and self.proxy_password: proxy_url = 'http://%s:%s@%s:%s' % (self.proxy_username, self.proxy_password, self.proxy_host, self.proxy_port) else: proxy_url = 'http://%s:%s' % (self.proxy_host, self.proxy_port) proxy_url = pquote(proxy_url) cmd.extend(['--proxy', proxy_url]) cmd.extend(['-i']) if method.lower() == 'head': # HEAD method need special handling cmd.extend(["--head"]) else: cmd.extend(["-X", pquote(method)]) for h in headers: cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) cert_file = getattr(self, 'cert_file', None) if cert_file: cmd.extend(["--cert", pquote(cert_file)]) # TODO: in python 2.6, body can be a file-like object. if body is not None and len(body) > 0: cmd.extend(["--data-binary", pquote(body)]) cmd.extend(["--compress"]) cmd.extend([pquote("%s%s" % (self.host, url))]) return " ".join(cmd) def getresponse(self): original_response = LibcloudConnection.getresponse(self) if self.log is not None: rv = self._log_response(HttpLibResponseProxy(original_response)) self.log.write(u(rv + "\n")) self.log.flush() return original_response def request(self, method, url, body=None, headers=None, **kwargs): headers.update({'X-LC-Request-ID': str(id(self))}) if self.log is not None: pre = "# -------- begin %d request ----------\n" % id(self) self.log.write(u(pre + self._log_curl(method, url, body, headers) + "\n")) self.log.flush() return LibcloudConnection.request(self, method, url, body, headers) apache-libcloud-2.2.1/libcloud/utils/publickey.py0000664000175000017500000000460512701023453021716 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hashlib from libcloud.utils.py3 import hexadigits from libcloud.utils.py3 import bchr __all__ = [ 'get_pubkey_openssh_fingerprint', 'get_pubkey_ssh2_fingerprint', 'get_pubkey_comment' ] try: from Crypto.Util.asn1 import DerSequence, DerObject from Crypto.PublicKey.RSA import algorithmIdentifier, importKey pycrypto_available = True except ImportError: pycrypto_available = False def _to_md5_fingerprint(data): hashed = hashlib.md5(data).digest() return ":".join(hexadigits(hashed)) def get_pubkey_openssh_fingerprint(pubkey): # We import and export the key to make sure it is in OpenSSH format if not pycrypto_available: raise RuntimeError('pycrypto is not available') k = importKey(pubkey) pubkey = k.exportKey('OpenSSH')[7:] decoded = base64.decodestring(pubkey) return _to_md5_fingerprint(decoded) def get_pubkey_ssh2_fingerprint(pubkey): # This is the format that EC2 shows for public key fingerprints in its # KeyPair mgmt API if not pycrypto_available: raise RuntimeError('pycrypto is not available') k = importKey(pubkey) derPK = DerSequence([k.n, k.e]) bitmap = DerObject('BIT STRING') bitmap.payload = bchr(0x00) + derPK.encode() der = DerSequence([algorithmIdentifier, bitmap.encode()]) return _to_md5_fingerprint(der.encode()) def get_pubkey_comment(pubkey, default=None): if pubkey.startswith("ssh-"): # This is probably an OpenSSH key return pubkey.strip().split(' ', 3)[2] if default: return default raise ValueError('Public key is not in a supported format') apache-libcloud-2.2.1/libcloud/utils/decorators.py0000664000175000017500000000350013153541406022072 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from functools import wraps from libcloud.common.types import LibcloudError __all__ = [ 'wrap_non_libcloud_exceptions' ] def wrap_non_libcloud_exceptions(func): """ Decorators function which catches non LibcloudError exceptions, wraps them in LibcloudError class and re-throws the wrapped exception. Note: This function should only be used to wrap methods on the driver classes. """ @wraps(func) def decorated_function(*args, **kwargs): try: return func(*args, **kwargs) except Exception: e = sys.exc_info()[1] if isinstance(e, LibcloudError): raise e if len(args) >= 1: driver = args[0] else: driver = None fault = getattr(e, 'fault', None) if fault and getattr(fault, 'string', None): message = fault.string else: message = str(e) raise LibcloudError(value=message, driver=driver) return decorated_function apache-libcloud-2.2.1/libcloud/utils/escape.py0000664000175000017500000000161312701023453021163 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'sanitize_object_name', ] def sanitize_object_name(object_name): return object_name.replace('\\', '/') apache-libcloud-2.2.1/libcloud/utils/logging.py0000664000175000017500000000332612701023453021354 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Taken from https://github.com/Kami/python-extra-log-formatters from __future__ import absolute_import import logging __all__ = [ 'ExtraLogFormatter' ] class ExtraLogFormatter(logging.Formatter): """ Custom log formatter which attaches all the attributes from the "extra" dictionary which start with an underscore to the end of the log message. For example: extra={'_id': 'user-1', '_path': '/foo/bar'} """ def format(self, record): custom_attributes = dict([(k, v) for k, v in record.__dict__.items() if k.startswith('_')]) custom_attributes = self._dict_to_str(custom_attributes) msg = logging.Formatter.format(self, record) msg = '%s (%s)' % (msg, custom_attributes) return msg def _dict_to_str(self, dictionary): result = ['%s=%s' % (k[1:], str(v)) for k, v in dictionary.items()] result = ','.join(result) return result apache-libcloud-2.2.1/libcloud/utils/networking.py0000664000175000017500000000721612701023453022117 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import struct import platform __all__ = [ 'is_private_subnet', 'is_public_subnet', 'is_valid_ip_address', 'join_ipv4_segments', 'increment_ipv4_segments' ] def is_private_subnet(ip): """ Utility function to check if an IP address is inside a private subnet. :type ip: ``str`` :param ip: IP address to check :return: ``bool`` if the specified IP address is private. """ priv_subnets = [{'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, {'subnet': '192.168.0.0', 'mask': '255.255.0.0'}] ip = struct.unpack('I', socket.inet_aton(ip))[0] for network in priv_subnets: subnet = struct.unpack('I', socket.inet_aton(network['subnet']))[0] mask = struct.unpack('I', socket.inet_aton(network['mask']))[0] if (ip & mask) == (subnet & mask): return True return False def is_public_subnet(ip): """ Utility function to check if an IP address is inside a public subnet. :type ip: ``str`` :param ip: IP address to check :return: ``bool`` if the specified IP address is public. """ return not is_private_subnet(ip=ip) def is_valid_ip_address(address, family=socket.AF_INET): """ Check if the provided address is valid IPv4 or IPv6 address. :param address: IPv4 or IPv6 address to check. :type address: ``str`` :param family: Address family (socket.AF_INTET / socket.AF_INET6). :type family: ``int`` :return: ``bool`` True if the provided address is valid. """ is_windows = platform.system() == 'Windows' if is_windows and family == socket.AF_INET6: raise ValueError('Checking IPv6 addresses is not supported on Windows') try: if is_windows: socket.inet_aton(address) else: socket.inet_pton(family, address) except socket.error: return False return True def join_ipv4_segments(segments): """ Helper method to join ip numeric segment pieces back into a full ip address. :param segments: IPv4 segments to join. :type segments: ``list`` or ``tuple`` :return: IPv4 address. :rtype: ``str`` """ return '.'.join([str(s) for s in segments]) def increment_ipv4_segments(segments): """ Increment an ip address given in quad segments based on ipv4 rules :param segments: IPv4 segments to increment. :type segments: ``list`` or ``tuple`` :return: Incremented segments. :rtype: ``list`` """ segments = [int(segment) for segment in segments] segments[3] += 1 if segments[3] == 256: segments[3] = 0 segments[2] += 1 if segments[2] == 256: segments[2] = 0 segments[1] += 1 if segments[1] == 256: segments[1] = 0 segments[0] += 1 return segments apache-libcloud-2.2.1/libcloud/utils/xml.py0000664000175000017500000000323612701023453020526 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'fixxpath', 'findtext', 'findattr', 'findall' ] def fixxpath(xpath, namespace=None): # ElementTree wants namespaces in its xpaths, so here we add them. if not namespace: return xpath return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')]) def findtext(element, xpath, namespace=None, no_text_value=''): """ :param no_text_value: Value to return if the provided element has no text value. :type no_text_value: ``object`` """ value = element.findtext(fixxpath(xpath=xpath, namespace=namespace)) if value == '': return no_text_value return value def findattr(element, xpath, namespace=None): return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) def findall(element, xpath, namespace=None): return element.findall(fixxpath(xpath=xpath, namespace=namespace)) apache-libcloud-2.2.1/libcloud/utils/dist.py0000664000175000017500000001155512701023453020674 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Taken From Twisted Python which licensed under MIT license # https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py # https://github.com/powdahound/twisted/blob/master/LICENSE import os import fnmatch # Names that are excluded from globbing results: EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs', 'RCS', 'SCCS', '.svn'] EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py'] def _filter_names(names): """ Given a list of file names, return those names that should be copied. """ names = [n for n in names if n not in EXCLUDE_NAMES] # This is needed when building a distro from a working # copy (likely a checkout) rather than a pristine export: for pattern in EXCLUDE_PATTERNS: names = [n for n in names if not fnmatch.fnmatch(n, pattern) and not n.endswith('.py')] return names def relative_to(base, relativee): """ Gets 'relativee' relative to 'basepath'. i.e., >>> relative_to('/home/', '/home/radix/') 'radix' >>> relative_to('.', '/home/radix/Projects/Twisted') 'Projects/Twisted' The 'relativee' must be a child of 'basepath'. """ basepath = os.path.abspath(base) relativee = os.path.abspath(relativee) if relativee.startswith(basepath): relative = relativee[len(basepath):] if relative.startswith(os.sep): relative = relative[1:] return os.path.join(base, relative) raise ValueError("%s is not a subpath of %s" % (relativee, basepath)) def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None): """ Get all packages which are under dname. This is necessary for Python 2.2's distutils. Pretty similar arguments to getDataFiles, including 'parent'. """ parent = parent or "" prefix = [] if parent: prefix = [parent] bname = os.path.basename(dname) ignore = ignore or [] if bname in ignore: return [] if results is None: results = [] if pkgname is None: pkgname = [] subfiles = os.listdir(dname) abssubfiles = [os.path.join(dname, x) for x in subfiles] if '__init__.py' in subfiles: results.append(prefix + pkgname + [bname]) for subdir in filter(os.path.isdir, abssubfiles): get_packages(subdir, pkgname=pkgname + [bname], results=results, ignore=ignore, parent=parent) res = ['.'.join(result) for result in results] return res def get_data_files(dname, ignore=None, parent=None): """ Get all the data files that should be included in this distutils Project. 'dname' should be the path to the package that you're distributing. 'ignore' is a list of sub-packages to ignore. This facilitates disparate package hierarchies. That's a fancy way of saying that the 'twisted' package doesn't want to include the 'twisted.conch' package, so it will pass ['conch'] as the value. 'parent' is necessary if you're distributing a subpackage like twisted.conch. 'dname' should point to 'twisted/conch' and 'parent' should point to 'twisted'. This ensures that your data_files are generated correctly, only using relative paths for the first element of the tuple ('twisted/conch/*'). The default 'parent' is the current working directory. """ parent = parent or "." ignore = ignore or [] result = [] for directory, subdirectories, filenames in os.walk(dname): resultfiles = [] for exname in EXCLUDE_NAMES: if exname in subdirectories: subdirectories.remove(exname) for ig in ignore: if ig in subdirectories: subdirectories.remove(ig) for filename in _filter_names(filenames): resultfiles.append(filename) if resultfiles: for filename in resultfiles: file_path = os.path.join(directory, filename) if parent: file_path = file_path.replace(parent + os.sep, '') result.append(file_path) return result apache-libcloud-2.2.1/libcloud/backup/0000775000175000017500000000000013160535107017461 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/backup/providers.py0000664000175000017500000000306012701223644022047 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.backup.types import Provider from libcloud.common.providers import get_driver as _get_provider_driver from libcloud.common.providers import set_driver as _set_provider_driver DRIVERS = { Provider.DUMMY: ('libcloud.backup.drivers.dummy', 'DummyBackupDriver'), Provider.EBS: ('libcloud.backup.drivers.ebs', 'EBSBackupDriver'), Provider.GCE: ('libcloud.backup.drivers.gce', 'GCEBackupDriver'), Provider.DIMENSIONDATA: ('libcloud.backup.drivers.dimensiondata', 'DimensionDataBackupDriver') } def get_driver(provider): return _get_provider_driver(drivers=DRIVERS, provider=provider) def set_driver(provider, module, klass): return _set_provider_driver(drivers=DRIVERS, provider=provider, module=module, klass=klass) apache-libcloud-2.2.1/libcloud/backup/__init__.py0000664000175000017500000000000012701023453021554 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/backup/types.py0000664000175000017500000000350512703744741021212 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'Provider', 'BackupTargetType', 'BackupTargetJobStatusType' ] class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. """ DUMMY = 'dummy' DIMENSIONDATA = 'dimensiondata' EBS = 'ebs' GCE = 'gce' class BackupTargetType(object): """ Backup Target type. """ VIRTUAL = 'Virtual' """ Denotes a virtual host """ PHYSICAL = 'Physical' """ Denotes a physical host """ FILESYSTEM = 'Filesystem' """ Denotes a file system (e.g. NAS) """ DATABASE = 'Database' """ Denotes a database target """ OBJECT = 'Object' """ Denotes an object based file system """ VOLUME = 'Volume' """ Denotes a block storage volume """ class BackupTargetJobStatusType(object): """ The status of a backup target job """ RUNNING = 'Running' CANCELLED = 'Cancelled' FAILED = 'Failed' COMPLETED = 'Completed' PENDING = 'Pending' apache-libcloud-2.2.1/libcloud/backup/drivers/0000775000175000017500000000000013160535107021137 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/backup/drivers/__init__.py0000664000175000017500000000000012701023453023232 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/backup/drivers/ebs.py0000664000175000017500000003246512703467102022275 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'EBSBackupDriver' ] from libcloud.utils.xml import findtext, findall from libcloud.utils.iso8601 import parse_date from libcloud.backup.base import BackupDriver, BackupTargetRecoveryPoint,\ BackupTargetJob, BackupTarget from libcloud.backup.types import BackupTargetType, BackupTargetJobStatusType from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection VERSION = '2015-10-01' HOST = 'ec2.amazonaws.com' ROOT = '/%s/' % (VERSION) NS = 'http://ec2.amazonaws.com/doc/%s/' % (VERSION, ) class EBSResponse(AWSGenericResponse): """ Amazon EBS response class. """ namespace = NS exceptions = {} xpath = 'Error' class EBSConnection(SignedAWSConnection): version = VERSION host = HOST responseCls = EBSResponse service_name = 'backup' class EBSBackupDriver(BackupDriver): name = 'Amazon EBS Backup Driver' website = 'http://aws.amazon.com/ebs/' connectionCls = EBSConnection def __init__(self, access_id, secret, region): super(EBSBackupDriver, self).__init__(access_id, secret) self.region = region self.connection.host = HOST % (region) def get_supported_target_types(self): """ Get a list of backup target types this driver supports :return: ``list`` of :class:``BackupTargetType`` """ return [BackupTargetType.VOLUME] def list_targets(self): """ List all backuptargets :rtype: ``list`` of :class:`BackupTarget` """ raise NotImplementedError( 'list_targets not implemented for this driver') def create_target(self, name, address, type=BackupTargetType.VOLUME, extra=None): """ Creates a new backup target :param name: Name of the target :type name: ``str`` :param address: The volume ID. :type address: ``str`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Does nothing since any volume can be snapped at anytime. return self.ex_get_target_by_volume_id(address) def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target from an existing node :param node: The Node to backup :type node: ``Node`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Get the first EBS volume. device_mapping = node.extra['block_device_mapping'] if device_mapping is not None: return self.create_target( name=node.name, address=device_mapping['ebs'][0]['volume_id'], type=BackupTargetType.VOLUME, extra=None) else: raise RuntimeError("Node does not have any block devices") def create_target_from_container(self, container, type=BackupTargetType.OBJECT, extra=None): """ Creates a new backup target from an existing storage container :param node: The Container to backup :type node: ``Container`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ raise NotImplementedError( 'create_target_from_container not implemented for this driver') def update_target(self, target, name, address, extra): """ Update the properties of a backup target :param target: Backup target to update :type target: Instance of :class:`BackupTarget` :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Does nothing since any volume can be snapped at anytime. return self.ex_get_target_by_volume_id(address) def delete_target(self, target): """ Delete a backup target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` """ raise NotImplementedError( 'delete_target not implemented for this driver') def list_recovery_points(self, target, start_date=None, end_date=None): """ List the recovery points available for a target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param start_date: The start date to show jobs between (optional) :type start_date: :class:`datetime.datetime` :param end_date: The end date to show jobs between (optional) :type end_date: :class:`datetime.datetime`` :rtype: ``list`` of :class:`BackupTargetRecoveryPoint` """ params = { 'Action': 'DescribeSnapshots', 'Filter.1.Name': 'volume-id', 'Filter.1.Value': target.extra['volume-id'] } data = self.connection.request(ROOT, params=params).object return self._to_recovery_points(data, target) def recover_target(self, target, recovery_point, path=None): """ Recover a backup target to a recovery point :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'delete_target not implemented for this driver') def recover_target_out_of_place(self, target, recovery_point, recovery_target, path=None): """ Recover a backup target to a recovery point out-of-place :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param recovery_target: Backup target with to recover the data to :type recovery_target: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'delete_target not implemented for this driver') def get_target_job(self, target, id): """ Get a specific backup job by ID :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param id: Backup target with the backup data :type id: Instance of :class:`BackupTarget` :rtype: :class:`BackupTargetJob` """ jobs = self.list_target_jobs(target) return list(filter(lambda x: x.id == id, jobs))[0] def list_target_jobs(self, target): """ List the backup jobs on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :rtype: ``list`` of :class:`BackupTargetJob` """ params = { 'Action': 'DescribeSnapshots', 'Filter.1.Name': 'volume-id', 'Filter.1.Value': target.extra['volume-id'], 'Filter.2.Name': 'status', 'Filter.2.Value': 'pending' } data = self.connection.request(ROOT, params=params).object return self._to_jobs(data) def create_target_job(self, target, extra=None): """ Create a new backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTargetJob` """ params = { 'Action': 'CreateSnapshot', 'VolumeId': target.extra['volume-id'] } data = self.connection.request(ROOT, params=params).object xpath = 'CreateSnapshotResponse' return self._to_job(findall(element=data, xpath=xpath, namespace=NS)[0]) def resume_target_job(self, job): """ Resume a suspended backup job on a target :param job: Backup target job to resume :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'resume_target_job not supported for this driver') def suspend_target_job(self, job): """ Suspend a running backup job on a target :param job: Backup target job to suspend :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'suspend_target_job not supported for this driver') def cancel_target_job(self, job): """ Cancel a backup job on a target :param job: Backup target job to cancel :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'cancel_target_job not supported for this driver') def _to_recovery_points(self, data, target): xpath = 'DescribeSnapshotsResponse/snapshotSet/item' return [self._to_recovery_point(el, target) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_recovery_point(self, el, target): id = findtext(element=el, xpath='snapshotId', namespace=NS) date = parse_date( findtext(element=el, xpath='startTime', namespace=NS)) tags = self._get_resource_tags(el) point = BackupTargetRecoveryPoint( id=id, date=date, target=target, driver=self.connection.driver, extra={ 'snapshot-id': id, 'tags': tags }, ) return point def _to_jobs(self, data): xpath = 'DescribeSnapshotsResponse/snapshotSet/item' return [self._to_job(el) for el in findall(element=data, xpath=xpath, namespace=NS)] def _to_job(self, el): id = findtext(element=el, xpath='snapshotId', namespace=NS) progress = findtext(element=el, xpath='progress', namespace=NS)\ .replace('%', '') volume_id = findtext(element=el, xpath='volumeId', namespace=NS) target = self.ex_get_target_by_volume_id(volume_id) job = BackupTargetJob( id=id, status=BackupTargetJobStatusType.PENDING, progress=int(progress), target=target, driver=self.connection.driver, extra={ }, ) return job def ex_get_target_by_volume_id(self, volume_id): return BackupTarget( id=volume_id, name=volume_id, address=volume_id, type=BackupTargetType.VOLUME, driver=self.connection.driver, extra={ "volume-id": volume_id } ) def _get_resource_tags(self, element): """ Parse tags from the provided element and return a dictionary with key/value pairs. :rtype: ``dict`` """ tags = {} # Get our tag set by parsing the element tag_set = findall(element=element, xpath='tagSet/item', namespace=NS) for tag in tag_set: key = findtext(element=tag, xpath='key', namespace=NS) value = findtext(element=tag, xpath='value', namespace=NS) tags[key] = value return tags apache-libcloud-2.2.1/libcloud/backup/drivers/dummy.py0000664000175000017500000000266412701023453022650 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.backup.base import BackupDriver class DummyBackupDriver(BackupDriver): """ Dummy Backup driver. >>> from libcloud.backup.drivers.dummy import DummyBackupDriver >>> driver = DummyBackupDriver('key', 'secret') >>> driver.name 'Dummy Backup Provider' """ name = 'Dummy Backup Provider' website = 'http://example.com' def __init__(self, api_key, api_secret): """ :param api_key: API key or username to used (required) :type api_key: ``str`` :param api_secret: Secret password to be used (required) :type api_secret: ``str`` :rtype: ``None`` """ apache-libcloud-2.2.1/libcloud/backup/drivers/gce.py0000664000175000017500000004065012701023453022250 0ustar kamikami00000000000000 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'GCEBackupDriver' ] from libcloud.utils.iso8601 import parse_date from libcloud.backup.base import BackupDriver, BackupTargetRecoveryPoint,\ BackupTargetJob, BackupTarget from libcloud.backup.types import BackupTargetType, BackupTargetJobStatusType from libcloud.common.google import GoogleResponse, GoogleBaseConnection API_VERSION = 'v1' DEFAULT_TASK_COMPLETION_TIMEOUT = 180 class GCEResponse(GoogleResponse): pass class GCEConnection(GoogleBaseConnection): """ Connection class for the GCE driver. GCEConnection extends :class:`google.GoogleBaseConnection` for 2 reasons: 1. modify request_path for GCE URI. 2. Implement gce_params functionality described below. If the parameter gce_params is set to a dict prior to calling request(), the URL parameters will be updated to include those key/values FOR A SINGLE REQUEST. If the response contains a nextPageToken, gce_params['pageToken'] will be set to its value. This can be used to implement paging in list: >>> params, more_results = {'maxResults': 2}, True >>> while more_results: ... driver.connection.gce_params=params ... driver.ex_list_urlmaps() ... more_results = 'pageToken' in params ... [, ] [] """ host = 'www.googleapis.com' responseCls = GCEResponse def __init__(self, user_id, key, secure, auth_type=None, credential_file=None, project=None, **kwargs): super(GCEConnection, self).__init__(user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, **kwargs) self.request_path = '/compute/%s/projects/%s' % (API_VERSION, project) self.gce_params = None def pre_connect_hook(self, params, headers): """ Update URL parameters with values from self.gce_params. @inherits: :class:`GoogleBaseConnection.pre_connect_hook` """ params, headers = super(GCEConnection, self).pre_connect_hook(params, headers) if self.gce_params: params.update(self.gce_params) return params, headers def request(self, *args, **kwargs): """ Perform request then do GCE-specific processing of URL params. @inherits: :class:`GoogleBaseConnection.request` """ response = super(GCEConnection, self).request(*args, **kwargs) # If gce_params has been set, then update the pageToken with the # nextPageToken so it can be used in the next request. if self.gce_params: if 'nextPageToken' in response.object: self.gce_params['pageToken'] = response.object['nextPageToken'] elif 'pageToken' in self.gce_params: del self.gce_params['pageToken'] self.gce_params = None return response class GCEBackupDriver(BackupDriver): name = 'Google Compute Engine Backup Driver' website = 'http://cloud.google.com/' connectionCls = GCEConnection def __init__(self, user_id, key=None, project=None, auth_type=None, scopes=None, credential_file=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :keyword project: Your GCE project name. (required) :type project: ``str`` :keyword auth_type: Accepted values are "SA" or "IA" or "GCE" ("Service Account" or "Installed Application" or "GCE" if libcloud is being used on a GCE instance with service account enabled). If not supplied, auth_type will be guessed based on value of user_id or if the code is being executed in a GCE instance. :type auth_type: ``str`` :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` :keyword credential_file: Path to file for caching authentication information used by GCEConnection. :type credential_file: ``str`` """ if not project: raise ValueError('Project name must be specified using ' '"project" keyword.') self.auth_type = auth_type self.project = project self.scopes = scopes self.credential_file = credential_file or \ '~/.gce_libcloud_auth' + '.' + self.project super(GCEBackupDriver, self).__init__(user_id, key, **kwargs) # Cache Zone and Region information to reduce API calls and # increase speed self.base_path = '/compute/%s/projects/%s' % (API_VERSION, self.project) def get_supported_target_types(self): """ Get a list of backup target types this driver supports :return: ``list`` of :class:``BackupTargetType`` """ return [BackupTargetType.VOLUME] def list_targets(self): """ List all backuptargets :rtype: ``list`` of :class:`BackupTarget` """ raise NotImplementedError( 'list_targets not implemented for this driver') def create_target(self, name, address, type=BackupTargetType.VOLUME, extra=None): """ Creates a new backup target :param name: Name of the target :type name: ``str`` :param address: The volume ID. :type address: ``str`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Does nothing since any volume can be snapped at anytime. return self.ex_get_target_by_source(address) def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target from an existing node :param node: The Node to backup :type node: ``Node`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Get the first persistent disk disks = node.extra['disks'] if disks is not None: return self.create_target( name=node.name, address=disks[0]['source'], type=BackupTargetType.VOLUME, extra=None) else: raise RuntimeError("Node does not have any block devices") def create_target_from_container(self, container, type=BackupTargetType.OBJECT, extra=None): """ Creates a new backup target from an existing storage container :param node: The Container to backup :type node: ``Container`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ raise NotImplementedError( 'create_target_from_container not implemented for this driver') def update_target(self, target, name, address, extra): """ Update the properties of a backup target :param target: Backup target to update :type target: Instance of :class:`BackupTarget` :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ # Does nothing since any volume can be snapped at anytime. return self.ex_get_target_by_source(address) def delete_target(self, target): """ Delete a backup target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` """ raise NotImplementedError( 'delete_target not implemented for this driver') def list_recovery_points(self, target, start_date=None, end_date=None): """ List the recovery points available for a target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param start_date: The start date to show jobs between (optional) :type start_date: :class:`datetime.datetime` :param end_date: The end date to show jobs between (optional) :type end_date: :class:`datetime.datetime`` :rtype: ``list`` of :class:`BackupTargetRecoveryPoint` """ request = '/global/snapshots' response = self.connection.request(request, method='GET').object return self._to_recovery_points(response, target) def recover_target(self, target, recovery_point, path=None): """ Recover a backup target to a recovery point :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'recover_target not implemented for this driver') def recover_target_out_of_place(self, target, recovery_point, recovery_target, path=None): """ Recover a backup target to a recovery point out-of-place :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param recovery_target: Backup target with to recover the data to :type recovery_target: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'recover_target_out_of_place not implemented for this driver') def get_target_job(self, target, id): """ Get a specific backup job by ID :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param id: Backup target with the backup data :type id: Instance of :class:`BackupTarget` :rtype: :class:`BackupTargetJob` """ jobs = self.list_target_jobs(target) return list(filter(lambda x: x.id == id, jobs))[0] def list_target_jobs(self, target): """ List the backup jobs on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :rtype: ``list`` of :class:`BackupTargetJob` """ return [] def create_target_job(self, target, extra=None): """ Create a new backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTargetJob` """ name = target.name request = '/zones/%s/disks/%s/createSnapshot' % ( target.extra['zone'].name, target.name) snapshot_data = { 'source': target.extra['source'] } self.connection.async_request(request, method='POST', data=snapshot_data) return self._to_job(self.ex_get_snapshot(name), target) def resume_target_job(self, target, job): """ Resume a suspended backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param job: Backup target job to resume :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'resume_target_job not supported for this driver') def suspend_target_job(self, target, job): """ Suspend a running backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param job: Backup target job to suspend :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'suspend_target_job not supported for this driver') def cancel_target_job(self, target, job): """ Cancel a backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param job: Backup target job to cancel :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'cancel_target_job not supported for this driver') def _to_recovery_points(self, data, target): return [self._to_recovery_point(item, target) for item in data.items] def _to_recovery_point(self, item, target): id = item.id date = parse_date(item.creationTimestamp) point = BackupTargetRecoveryPoint( id=id, date=date, target=target, driver=self.connection.driver, extra={ 'snapshot-id': id, }, ) return point def _to_jobs(self, data, target): return [self._to_job(item, target) for item in data.items] def _to_job(self, item, target): id = item.id job = BackupTargetJob( id=id, status=BackupTargetJobStatusType.PENDING, progress=0, target=target, driver=self.connection.driver, extra={ }, ) return job def ex_get_snapshot(self, name): request = '/global/snapshots/%s' % (name) response = self.connection.request(request, method='GET').object return response def ex_get_target_by_source(self, source): return BackupTarget( id=source, name=source, address=source, type=BackupTargetType.VOLUME, driver=self.connection.driver, extra={ "source": source } ) apache-libcloud-2.2.1/libcloud/backup/drivers/dimensiondata.py0000664000175000017500000006232113153541406024335 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import ET from libcloud.backup.base import BackupDriver, BackupTarget, BackupTargetJob from libcloud.backup.types import BackupTargetType from libcloud.backup.types import Provider from libcloud.common.dimensiondata import dd_object_to_id from libcloud.common.dimensiondata import DimensionDataConnection from libcloud.common.dimensiondata import DimensionDataBackupClient from libcloud.common.dimensiondata import DimensionDataBackupClientAlert from libcloud.common.dimensiondata import DimensionDataBackupClientType from libcloud.common.dimensiondata import DimensionDataBackupDetails from libcloud.common.dimensiondata import DimensionDataBackupSchedulePolicy from libcloud.common.dimensiondata import DimensionDataBackupStoragePolicy from libcloud.common.dimensiondata import API_ENDPOINTS, DEFAULT_REGION from libcloud.common.dimensiondata import TYPES_URN from libcloud.common.dimensiondata import GENERAL_NS, BACKUP_NS from libcloud.utils.xml import fixxpath, findtext, findall # pylint: disable=no-member DEFAULT_BACKUP_PLAN = 'Advanced' class DimensionDataBackupDriver(BackupDriver): """ DimensionData backup driver. """ selected_region = None connectionCls = DimensionDataConnection name = 'Dimension Data Backup' website = 'https://cloud.dimensiondata.com/' type = Provider.DIMENSIONDATA api_version = 1.0 network_domain_id = None def __init__(self, key, secret=None, secure=True, host=None, port=None, api_version=None, region=DEFAULT_REGION, **kwargs): if region not in API_ENDPOINTS and host is None: raise ValueError( 'Invalid region: %s, no host specified' % (region)) if region is not None: self.selected_region = API_ENDPOINTS[region] super(DimensionDataBackupDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, api_version=api_version, region=region, **kwargs) def _ex_connection_class_kwargs(self): """ Add the region to the kwargs before the connection is instantiated """ kwargs = super(DimensionDataBackupDriver, self)._ex_connection_class_kwargs() kwargs['region'] = self.selected_region return kwargs def get_supported_target_types(self): """ Get a list of backup target types this driver supports :return: ``list`` of :class:``BackupTargetType`` """ return [BackupTargetType.VIRTUAL] def list_targets(self): """ List all backuptargets :rtype: ``list`` of :class:`BackupTarget` """ targets = self._to_targets( self.connection.request_with_orgId_api_2('server/server').object) return targets def create_target(self, name, address, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target :param name: Name of the target (not used) :type name: ``str`` :param address: The ID of the node in Dimension Data Cloud :type address: ``str`` :param type: Backup target type, only Virtual supported :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ if extra is not None: service_plan = extra.get('servicePlan', DEFAULT_BACKUP_PLAN) else: service_plan = DEFAULT_BACKUP_PLAN extra = {'servicePlan': service_plan} create_node = ET.Element('NewBackup', {'xmlns': BACKUP_NS}) create_node.set('servicePlan', service_plan) response = self.connection.request_with_orgId_api_1( 'server/%s/backup' % (address), method='POST', data=ET.tostring(create_node)).object asset_id = None for info in findall(response, 'additionalInformation', GENERAL_NS): if info.get('name') == 'assetId': asset_id = findtext(info, 'value', GENERAL_NS) return BackupTarget( id=asset_id, name=name, address=address, type=type, extra=extra, driver=self ) def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target from an existing node :param node: The Node to backup :type node: ``Node`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ return self.create_target(name=node.name, address=node.id, type=BackupTargetType.VIRTUAL, extra=extra) def create_target_from_container(self, container, type=BackupTargetType.OBJECT, extra=None): """ Creates a new backup target from an existing storage container :param node: The Container to backup :type node: ``Container`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ return NotImplementedError( 'create_target_from_container not supported for this driver') def update_target(self, target, name=None, address=None, extra=None): """ Update the properties of a backup target, only changing the serviceplan is supported. :param target: Backup target to update :type target: Instance of :class:`BackupTarget` or ``str`` :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTarget` """ if extra is not None: service_plan = extra.get('servicePlan', DEFAULT_BACKUP_PLAN) else: service_plan = DEFAULT_BACKUP_PLAN request = ET.Element('ModifyBackup', {'xmlns': BACKUP_NS}) request.set('servicePlan', service_plan) server_id = self._target_to_target_address(target) self.connection.request_with_orgId_api_1( 'server/%s/backup/modify' % (server_id), method='POST', data=ET.tostring(request)).object if isinstance(target, BackupTarget): target.extra = extra else: target = self.ex_get_target_by_id(server_id) return target def delete_target(self, target): """ Delete a backup target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` or ``str`` :rtype: ``bool`` """ server_id = self._target_to_target_address(target) response = self.connection.request_with_orgId_api_1( 'server/%s/backup?disable' % (server_id), method='GET').object response_code = findtext(response, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def list_recovery_points(self, target, start_date=None, end_date=None): """ List the recovery points available for a target :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param start_date: The start date to show jobs between (optional) :type start_date: :class:`datetime.datetime` :param end_date: The end date to show jobs between (optional) :type end_date: :class:`datetime.datetime`` :rtype: ``list`` of :class:`BackupTargetRecoveryPoint` """ raise NotImplementedError( 'list_recovery_points not implemented for this driver') def recover_target(self, target, recovery_point, path=None): """ Recover a backup target to a recovery point :param target: Backup target to delete :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'recover_target not implemented for this driver') def recover_target_out_of_place(self, target, recovery_point, recovery_target, path=None): """ Recover a backup target to a recovery point out-of-place :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`BackupTarget` :param recovery_target: Backup target with to recover the data to :type recovery_target: Instance of :class:`BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'recover_target_out_of_place not implemented for this driver') def get_target_job(self, target, id): """ Get a specific backup job by ID :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param id: Backup target with the backup data :type id: Instance of :class:`BackupTarget` :rtype: :class:`BackupTargetJob` """ jobs = self.list_target_jobs(target) return list(filter(lambda x: x.id == id, jobs))[0] def list_target_jobs(self, target): """ List the backup jobs on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :rtype: ``list`` of :class:`BackupTargetJob` """ raise NotImplementedError( 'list_target_jobs not implemented for this driver') def create_target_job(self, target, extra=None): """ Create a new backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'create_target_job not implemented for this driver') def resume_target_job(self, target, job): """ Resume a suspended backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param job: Backup target job to resume :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'resume_target_job not implemented for this driver') def suspend_target_job(self, target, job): """ Suspend a running backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` :param job: Backup target job to suspend :type job: Instance of :class:`BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'suspend_target_job not implemented for this driver') def cancel_target_job(self, job, ex_client=None, ex_target=None): """ Cancel a backup job on a target :param job: Backup target job to cancel. If it is ``None`` ex_client and ex_target must be set :type job: Instance of :class:`BackupTargetJob` or ``None`` :param ex_client: Client of the job to cancel. Not necessary if job is specified. DimensionData only has 1 job per client :type ex_client: Instance of :class:`DimensionDataBackupClient` or ``str`` :param ex_target: Target to cancel a job from. Not necessary if job is specified. :type ex_target: Instance of :class:`BackupTarget` or ``str`` :rtype: ``bool`` """ if job is None: if ex_client is None or ex_target is None: raise ValueError("Either job or ex_client and " "ex_target have to be set") server_id = self._target_to_target_address(ex_target) client_id = self._client_to_client_id(ex_client) else: server_id = job.target.address client_id = job.extra['clientId'] response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client/%s?cancelJob' % (server_id, client_id), method='GET').object response_code = findtext(response, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_get_target_by_id(self, id): """ Get a target by server id :param id: The id of the target you want to get :type id: ``str`` :rtype: :class:`BackupTarget` """ node = self.connection.request_with_orgId_api_2( 'server/server/%s' % id).object return self._to_target(node) def ex_add_client_to_target(self, target, client_type, storage_policy, schedule_policy, trigger, email): """ Add a client to a target :param target: Backup target with the backup data :type target: Instance of :class:`BackupTarget` or ``str`` :param client: Client to add to the target :type client: Instance of :class:`DimensionDataBackupClientType` or ``str`` :param storage_policy: The storage policy for the client :type storage_policy: Instance of :class:`DimensionDataBackupStoragePolicy` or ``str`` :param schedule_policy: The schedule policy for the client :type schedule_policy: Instance of :class:`DimensionDataBackupSchedulePolicy` or ``str`` :param trigger: The notify trigger for the client :type trigger: ``str`` :param email: The notify email for the client :type email: ``str`` :rtype: ``bool`` """ server_id = self._target_to_target_address(target) backup_elm = ET.Element('NewBackupClient', {'xmlns': BACKUP_NS}) if isinstance(client_type, DimensionDataBackupClientType): ET.SubElement(backup_elm, "type").text = client_type.type else: ET.SubElement(backup_elm, "type").text = client_type if isinstance(storage_policy, DimensionDataBackupStoragePolicy): ET.SubElement(backup_elm, "storagePolicyName").text = storage_policy.name else: ET.SubElement(backup_elm, "storagePolicyName").text = storage_policy if isinstance(schedule_policy, DimensionDataBackupSchedulePolicy): ET.SubElement(backup_elm, "schedulePolicyName").text = schedule_policy.name else: ET.SubElement(backup_elm, "schedulePolicyName").text = schedule_policy alerting_elm = ET.SubElement(backup_elm, "alerting") alerting_elm.set('trigger', trigger) ET.SubElement(alerting_elm, "emailAddress").text = email response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client' % (server_id), method='POST', data=ET.tostring(backup_elm)).object response_code = findtext(response, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_remove_client_from_target(self, target, backup_client): """ Removes a client from a backup target :param target: The backup target to remove the client from :type target: :class:`BackupTarget` or ``str`` :param backup_client: The backup client to remove :type backup_client: :class:`DimensionDataBackupClient` or ``str`` :rtype: ``bool`` """ server_id = self._target_to_target_address(target) client_id = self._client_to_client_id(backup_client) response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client/%s?disable' % (server_id, client_id), method='GET').object response_code = findtext(response, 'result', GENERAL_NS) return response_code in ['IN_PROGRESS', 'SUCCESS'] def ex_get_backup_details_for_target(self, target): """ Returns a backup details object for a target :param target: The backup target to get details for :type target: :class:`BackupTarget` or ``str`` :rtype: :class:`DimensionDataBackupDetails` """ if not isinstance(target, BackupTarget): target = self.ex_get_target_by_id(target) if target is None: return response = self.connection.request_with_orgId_api_1( 'server/%s/backup' % (target.address), method='GET').object return self._to_backup_details(response, target) def ex_list_available_client_types(self, target): """ Returns a list of available backup client types :param target: The backup target to list available types for :type target: :class:`BackupTarget` or ``str`` :rtype: ``list`` of :class:`DimensionDataBackupClientType` """ server_id = self._target_to_target_address(target) response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client/type' % (server_id), method='GET').object return self._to_client_types(response) def ex_list_available_storage_policies(self, target): """ Returns a list of available backup storage policies :param target: The backup target to list available policies for :type target: :class:`BackupTarget` or ``str`` :rtype: ``list`` of :class:`DimensionDataBackupStoragePolicy` """ server_id = self._target_to_target_address(target) response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client/storagePolicy' % (server_id), method='GET').object return self._to_storage_policies(response) def ex_list_available_schedule_policies(self, target): """ Returns a list of available backup schedule policies :param target: The backup target to list available policies for :type target: :class:`BackupTarget` or ``str`` :rtype: ``list`` of :class:`DimensionDataBackupSchedulePolicy` """ server_id = self._target_to_target_address(target) response = self.connection.request_with_orgId_api_1( 'server/%s/backup/client/schedulePolicy' % (server_id), method='GET').object return self._to_schedule_policies(response) def _to_storage_policies(self, object): elements = object.findall(fixxpath('storagePolicy', BACKUP_NS)) return [self._to_storage_policy(el) for el in elements] def _to_storage_policy(self, element): return DimensionDataBackupStoragePolicy( retention_period=int(element.get('retentionPeriodInDays')), name=element.get('name'), secondary_location=element.get('secondaryLocation') ) def _to_schedule_policies(self, object): elements = object.findall(fixxpath('schedulePolicy', BACKUP_NS)) return [self._to_schedule_policy(el) for el in elements] def _to_schedule_policy(self, element): return DimensionDataBackupSchedulePolicy( name=element.get('name'), description=element.get('description') ) def _to_client_types(self, object): elements = object.findall(fixxpath('backupClientType', BACKUP_NS)) return [self._to_client_type(el) for el in elements] def _to_client_type(self, element): description = element.get('description') if description is None: description = findtext(element, 'description', BACKUP_NS) return DimensionDataBackupClientType( type=element.get('type'), description=description, is_file_system=bool(element.get('isFileSystem') == 'true') ) def _to_backup_details(self, object, target): return DimensionDataBackupDetails( asset_id=object.get('assetId'), service_plan=object.get('servicePlan'), status=object.get('state'), clients=self._to_clients(object, target) ) def _to_clients(self, object, target): elements = object.findall(fixxpath('backupClient', BACKUP_NS)) return [self._to_client(el, target) for el in elements] def _to_client(self, element, target): client_id = element.get('id') return DimensionDataBackupClient( id=client_id, type=self._to_client_type(element), status=element.get('status'), schedule_policy=findtext(element, 'schedulePolicyName', BACKUP_NS), storage_policy=findtext(element, 'storagePolicyName', BACKUP_NS), download_url=findtext(element, 'downloadUrl', BACKUP_NS), running_job=self._to_backup_job(element, target, client_id), alert=self._to_alert(element) ) def _to_alert(self, element): alert = element.find(fixxpath('alerting', BACKUP_NS)) if alert is not None: notify_list = [ email_addr.text for email_addr in alert.findall(fixxpath('emailAddress', BACKUP_NS)) ] return DimensionDataBackupClientAlert( trigger=element.get('trigger'), notify_list=notify_list ) return None def _to_backup_job(self, element, target, client_id): running_job = element.find(fixxpath('runningJob', BACKUP_NS)) if running_job is not None: return BackupTargetJob( id=running_job.get('id'), status=running_job.get('status'), progress=int(running_job.get('percentageComplete')), driver=self.connection.driver, target=target, extra={'clientId': client_id} ) return None def _to_targets(self, object): node_elements = object.findall(fixxpath('server', TYPES_URN)) return [self._to_target(el) for el in node_elements] def _to_target(self, element): backup = findall(element, 'backup', TYPES_URN) if len(backup) == 0: return extra = { 'description': findtext(element, 'description', TYPES_URN), 'sourceImageId': findtext(element, 'sourceImageId', TYPES_URN), 'datacenterId': element.get('datacenterId'), 'deployedTime': findtext(element, 'createTime', TYPES_URN), 'servicePlan': backup[0].get('servicePlan') } n = BackupTarget(id=backup[0].get('assetId'), name=findtext(element, 'name', TYPES_URN), address=element.get('id'), driver=self.connection.driver, type=BackupTargetType.VIRTUAL, extra=extra) return n @staticmethod def _client_to_client_id(backup_client): return dd_object_to_id(backup_client, DimensionDataBackupClient) @staticmethod def _target_to_target_address(target): return dd_object_to_id(target, BackupTarget, id_value='address') apache-libcloud-2.2.1/libcloud/backup/base.py0000664000175000017500000003675312703467102020764 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.base import ConnectionUserAndKey, BaseDriver from libcloud.backup.types import BackupTargetType __all__ = [ 'BackupTarget', 'BackupDriver', 'BackupTargetJob', 'BackupTargetRecoveryPoint' ] class BackupTarget(object): """ A backup target """ def __init__(self, id, name, address, type, driver, extra=None): """ :param id: Target id :type id: ``str`` :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`.BackupTargetType` :param driver: BackupDriver instance. :type driver: :class:`.BackupDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.name = name self.address = address self.type = type self.driver = driver self.extra = extra or {} def update(self, name=None, address=None, extra=None): return self.driver.update_target(target=self, name=name, address=address, extra=extra) def delete(self): return self.driver.delete_target(target=self) def _get_numeric_id(self): target_id = self.id if target_id.isdigit(): target_id = int(target_id) return target_id def __repr__(self): return ('' % (self.id, self.name, self.address, self.type, self.driver.name)) class BackupTargetJob(object): """ A backup target job """ def __init__(self, id, status, progress, target, driver, extra=None): """ :param id: Job id :type id: ``str`` :param status: Status of the job :type status: :class:`BackupTargetJobStatusType` :param progress: Progress of the job, as a percentage :type progress: ``int`` :param target: BackupTarget instance. :type target: :class:`.BackupTarget` :param driver: BackupDriver instance. :type driver: :class:`.BackupDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.status = status self.progress = progress self.target = target self.driver = driver self.extra = extra or {} def cancel(self): return self.driver.cancel_target_job(job=self) def suspend(self): return self.driver.suspend_target_job(job=self) def resume(self): return self.driver.resume_target_job(job=self) def __repr__(self): return ('' % (self.id, self.status, self.progress, self.target.id, self.driver.name)) class BackupTargetRecoveryPoint(object): """ A backup target recovery point """ def __init__(self, id, date, target, driver, extra=None): """ :param id: Job id :type id: ``str`` :param date: The date taken :type date: :class:`datetime.datetime` :param target: BackupTarget instance. :type target: :class:`.BackupTarget` :param driver: BackupDriver instance. :type driver: :class:`.BackupDriver` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` """ self.id = str(id) if id else None self.date = date self.target = target self.driver = driver self.extra = extra or {} def recover(self, path=None): """ Recover this recovery point :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`.BackupTargetJob` """ return self.driver.recover_target(target=self.target, recovery_point=self, path=path) def recover_to(self, recovery_target, path=None): """ Recover this recovery point out of place :param recovery_target: Backup target with to recover the data to :type recovery_target: Instance of :class:`.BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`.BackupTargetJob` """ return self.driver.recover_target_out_of_place( target=self.target, recovery_point=self, recovery_target=recovery_target, path=path) def __repr__(self): return ('' % (self.id, self.date, self.target.id, self.driver.name)) class BackupDriver(BaseDriver): """ A base BackupDriver class to derive from This class is always subclassed by a specific driver. """ connectionCls = ConnectionUserAndKey name = None website = None def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :return: ``None`` """ super(BackupDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def get_supported_target_types(self): """ Get a list of backup target types this driver supports :return: ``list`` of :class:``BackupTargetType`` """ raise NotImplementedError( 'get_supported_target_types not implemented for this driver') def list_targets(self): """ List all backuptargets :rtype: ``list`` of :class:`.BackupTarget` """ raise NotImplementedError( 'list_targets not implemented for this driver') def create_target(self, name, address, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`.BackupTarget` """ raise NotImplementedError( 'create_target not implemented for this driver') def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL, extra=None): """ Creates a new backup target from an existing node. By default, this will use the first public IP of the node :param node: The Node to backup :type node: ``Node`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`.BackupTarget` """ return self.create_target(name=node.name, address=node.public_ips[0], type=type, extra=None) def create_target_from_storage_container(self, container, type=BackupTargetType.OBJECT, extra=None): """ Creates a new backup target from an existing storage container :param node: The Container to backup :type node: ``Container`` :param type: Backup target type (Physical, Virtual, ...). :type type: :class:`BackupTargetType` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`.BackupTarget` """ return self.create_target(name=container.name, address=container.get_cdn_url(), type=type, extra=None) def update_target(self, target, name, address, extra): """ Update the properties of a backup target :param target: Backup target to update :type target: Instance of :class:`.BackupTarget` :param name: Name of the target :type name: ``str`` :param address: Hostname, FQDN, IP, file path etc. :type address: ``str`` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`.BackupTarget` """ raise NotImplementedError( 'update_target not implemented for this driver') def delete_target(self, target): """ Delete a backup target :param target: Backup target to delete :type target: Instance of :class:`.BackupTarget` """ raise NotImplementedError( 'delete_target not implemented for this driver') def list_recovery_points(self, target, start_date=None, end_date=None): """ List the recovery points available for a target :param target: Backup target to delete :type target: Instance of :class:`.BackupTarget` :param start_date: The start date to show jobs between (optional) :type start_date: :class:`datetime.datetime` :param end_date: The end date to show jobs between (optional) :type end_date: :class:`datetime.datetime`` :rtype: ``list`` of :class:`.BackupTargetRecoveryPoint` """ raise NotImplementedError( 'list_recovery_points not implemented for this driver') def recover_target(self, target, recovery_point, path=None): """ Recover a backup target to a recovery point :param target: Backup target to delete :type target: Instance of :class:`.BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`.BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`.BackupTargetJob` """ raise NotImplementedError( 'recover_target not implemented for this driver') def recover_target_out_of_place(self, target, recovery_point, recovery_target, path=None): """ Recover a backup target to a recovery point out-of-place :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param recovery_point: Backup target with the backup data :type recovery_point: Instance of :class:`.BackupTarget` :param recovery_target: Backup target with to recover the data to :type recovery_target: Instance of :class:`.BackupTarget` :param path: The part of the recovery point to recover (optional) :type path: ``str`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'recover_target_out_of_place not implemented for this driver') def get_target_job(self, target, id): """ Get a specific backup job by ID :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param id: Backup target with the backup data :type id: Instance of :class:`.BackupTarget` :rtype: :class:`BackupTargetJob` """ jobs = self.list_target_jobs(target) return list(filter(lambda x: x.id == id, jobs))[0] def list_target_jobs(self, target): """ List the backup jobs on a target :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :rtype: ``list`` of :class:`.BackupTargetJob` """ raise NotImplementedError( 'list_target_jobs not implemented for this driver') def create_target_job(self, target, extra=None): """ Create a new backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param extra: (optional) Extra attributes (driver specific). :type extra: ``dict`` :rtype: Instance of :class:`BackupTargetJob` """ raise NotImplementedError( 'create_target_job not implemented for this driver') def resume_target_job(self, job): """ Resume a suspended backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param job: Backup target job to resume :type job: Instance of :class:`.BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'resume_target_job not implemented for this driver') def suspend_target_job(self, job): """ Suspend a running backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param job: Backup target job to suspend :type job: Instance of :class:`.BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'suspend_target_job not implemented for this driver') def cancel_target_job(self, job): """ Cancel a backup job on a target :param target: Backup target with the backup data :type target: Instance of :class:`.BackupTarget` :param job: Backup target job to cancel :type job: Instance of :class:`.BackupTargetJob` :rtype: ``bool`` """ raise NotImplementedError( 'cancel_target_job not implemented for this driver') apache-libcloud-2.2.1/libcloud/test/0000775000175000017500000000000013160535107017173 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/conftest.py0000664000175000017500000000154313153541406021376 0ustar kamikami00000000000000import os.path import pytest def pytest_configure(config): """Check that secrets.py is valid""" this_dir = os.path.abspath(os.path.split(__file__)[0]) secrets_current = os.path.join(this_dir, 'secrets.py') secrets_dist = os.path.join(this_dir, 'secrets.py-dist') if not os.path.isfile(secrets_current): print("Missing " + secrets_current) print("Maybe you forgot to copy it from -dist:") print("cp libcloud/test/secrets.py-dist libcloud/test/secrets.py") pytest.exit('') mtime_current = os.path.getmtime(secrets_current) mtime_dist = os.path.getmtime(secrets_dist) if mtime_dist > mtime_current: print("It looks like test/secrets.py file is out of date.") print("Please copy the new secrets.py-dist file over otherwise" + " tests might fail") pytest.exit('') apache-libcloud-2.2.1/libcloud/test/__init__.py0000664000175000017500000002060213160264502021302 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import requests from libcloud.common.base import Response from libcloud.http import LibcloudConnection from libcloud.utils.py3 import PY2 if PY2: from StringIO import StringIO else: from io import StringIO import requests_mock from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qs from libcloud.utils.py3 import parse_qsl from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import unittest2_required if unittest2_required: import unittest2 as unittest else: import unittest XML_HEADERS = {'content-type': 'application/xml'} class LibcloudTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): self._visited_urls = [] self._executed_mock_methods = [] super(LibcloudTestCase, self).__init__(*args, **kwargs) def setUp(self): self._visited_urls = [] self._executed_mock_methods = [] def _add_visited_url(self, url): self._visited_urls.append(url) def _add_executed_mock_method(self, method_name): self._executed_mock_methods.append(method_name) def assertExecutedMethodCount(self, expected): actual = len(self._executed_mock_methods) self.assertEqual(actual, expected, 'expected %d, but %d mock methods were executed' % (expected, actual)) class multipleresponse(object): """ A decorator that allows MockHttp objects to return multi responses """ count = 0 func = None def __init__(self, f): self.func = f def __call__(self, *args, **kwargs): ret = self.func(self.func.__class__, *args, **kwargs) response = ret[self.count] self.count = self.count + 1 return response class BodyStream(StringIO): def next(self, chunk_size=None): return StringIO.next(self) def __next__(self, chunk_size=None): return StringIO.__next__(self) def read(self, chunk_size=None): return StringIO.read(self) class MockHttp(LibcloudConnection): """ A mock HTTP client/server suitable for testing purposes. This replaces `HTTPConnection` by implementing its API and returning a mock response. Define methods by request path, replacing slashes (/) with underscores (_). Each of these mock methods should return a tuple of: (int status, str body, dict headers, str reason) """ type = None use_param = None # will use this param to namespace the request function test = None # TestCase instance which is using this mock proxy_url = None def __init__(self, *args, **kwargs): # Load assertion methods into the class, incase people want to assert # within a response if isinstance(self, unittest.TestCase): unittest.TestCase.__init__(self, '__init__') super(MockHttp, self).__init__(*args, **kwargs) def _get_request(self, method, url, body=None, headers=None): # Find a method we can use for this request parsed = urlparse.urlparse(url) _, _, path, _, query, _ = parsed qs = parse_qs(query) if path.endswith('/'): path = path[:-1] meth_name = self._get_method_name(type=self.type, use_param=self.use_param, qs=qs, path=path) meth = getattr(self, meth_name.replace('%', '_')) if self.test and isinstance(self.test, LibcloudTestCase): self.test._add_visited_url(url=url) self.test._add_executed_mock_method(method_name=meth_name) return meth(method, url, body, headers) def request(self, method, url, body=None, headers=None, raw=False, stream=False): headers = self._normalize_headers(headers=headers) r_status, r_body, r_headers, r_reason = self._get_request(method, url, body, headers) if r_body is None: r_body = '' # this is to catch any special chars e.g. ~ in the request. URL url = urlquote(url) with requests_mock.mock() as m: m.register_uri(method, url, text=r_body, reason=r_reason, headers=r_headers, status_code=r_status) try: super(MockHttp, self).request( method=method, url=url, body=body, headers=headers, raw=raw, stream=stream) except requests_mock.exceptions.NoMockAddress as nma: raise AttributeError("Failed to mock out URL {0} - {1}".format( url, nma.request.url )) def prepared_request(self, method, url, body=None, headers=None, raw=False, stream=False): headers = self._normalize_headers(headers=headers) r_status, r_body, r_headers, r_reason = self._get_request(method, url, body, headers) with requests_mock.mock() as m: m.register_uri(method, url, text=r_body, reason=r_reason, headers=r_headers, status_code=r_status) super(MockHttp, self).prepared_request( method=method, url=url, body=body, headers=headers, raw=raw, stream=stream) # Mock request/response example def _example(self, method, url, body, headers): """ Return a simple message and header, regardless of input. """ return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'}, httplib.responses[httplib.OK]) def _example_fail(self, method, url, body, headers): return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'}, httplib.responses[httplib.FORBIDDEN]) def _get_method_name(self, type, use_param, qs, path): path = path.split('?')[0] meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_') if type: meth_name = '%s_%s' % (meth_name, self.type) if use_param and use_param in qs: param = qs[use_param][0].replace('.', '_').replace('-', '_') meth_name = '%s_%s' % (meth_name, param) if meth_name == '': meth_name = 'root' return meth_name def assertUrlContainsQueryParams(self, url, expected_params, strict=False): """ Assert that provided url contains provided query parameters. :param url: URL to assert. :type url: ``str`` :param expected_params: Dictionary of expected query parameters. :type expected_params: ``dict`` :param strict: Assert that provided url contains only expected_params. (defaults to ``False``) :type strict: ``bool`` """ question_mark_index = url.find('?') if question_mark_index != -1: url = url[question_mark_index + 1:] params = dict(parse_qsl(url)) if strict: assert params == expected_params else: for key, value in expected_params.items(): assert key in params assert params[key] == value class MockConnection(object): def __init__(self, action): self.action = action StorageMockHttp = MockHttp def make_response(status=200, headers={}, connection=None): response = requests.Response() response.status_code = status response.headers = headers return Response(response, connection) def generate_random_data(size): data = '' current_size = 0 while current_size < size: value = str(random.randint(0, 9)) value_size = len(value) data += value current_size += value_size return data if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/test/pricing_test.json0000664000175000017500000000017712701023453022561 0ustar kamikami00000000000000{ "compute": { "foo": { "1": 1.00, "2": 2.00 } }, "updated": 1309019791 } apache-libcloud-2.2.1/libcloud/test/common/0000775000175000017500000000000013160535107020463 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/common/__init__.py0000664000175000017500000000141512701023453022571 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apache-libcloud-2.2.1/libcloud/test/common/test_retry_limit.py0000664000175000017500000000434413153541406024445 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import ssl from mock import Mock, patch, MagicMock from libcloud.utils.misc import TRANSIENT_SSL_ERROR from libcloud.common.base import Connection from libcloud.test import unittest CONFLICT_RESPONSE_STATUS = [ ('status', '429'), ('reason', 'CONFLICT'), ('retry_after', '3'), ('content-type', 'application/json')] SIMPLE_RESPONSE_STATUS = ('HTTP/1.1', 429, 'CONFLICT') @patch('os.environ', {'LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS': True}) class FailedRequestRetryTestCase(unittest.TestCase): def test_retry_connection(self): con = Connection(timeout=1, retry_delay=0.1) con.connection = Mock() connect_method = 'libcloud.common.base.Connection.request' with patch(connect_method) as mock_connect: try: mock_connect.side_effect = socket.gaierror('') con.request('/') except socket.gaierror: pass def test_retry_connection_ssl_error(self): conn = Connection(timeout=1, retry_delay=0.1) with patch.object(conn, 'connect', Mock()): with patch.object(conn, 'connection') as connection: connection.request = MagicMock( __name__='request', side_effect=ssl.SSLError(TRANSIENT_SSL_ERROR)) self.assertRaises(ssl.SSLError, conn.request, '/') self.assertGreater(connection.request.call_count, 1) if __name__ == '__main__': unittest.main() apache-libcloud-2.2.1/libcloud/test/common/fixtures/0000775000175000017500000000000013160535107022334 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/common/fixtures/google/0000775000175000017500000000000013160535107023610 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/common/fixtures/google/pkey.pem0000664000175000017500000000156712701023453025270 0ustar kamikami00000000000000-----BEGIN RSA PRIVATE KEY----- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -----END RSA PRIVATE KEY----- apache-libcloud-2.2.1/libcloud/test/common/fixtures/google/pkey.json0000664000175000017500000000220612701023453025447 0ustar kamikami00000000000000{ "private_key_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "private_key": "-----BEGIN PRIVATE KEY-----\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxxxxxxxxxxxxxxxxxxxx\n-----END PRIVATE KEY-----\n", "client_email": "foo@developer.gserviceaccount.com", "client_id": "foo.apps.googleusercontent.com", "type": "service_account" } apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/0000775000175000017500000000000013160535107024757 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v1_events_12345670.json0000664000175000017500000000017112701023453030711 0ustar kamikami00000000000000{"status":"OK","event":{"id":12345670,"event_type_id":1,"percentage":"100","droplet_id":1234560,"action_status":"done"}} ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v1_events_12345670_UNAUTHORIZED.jsonapache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v1_events_12345670_UNAUTHORIZED.js0000664000175000017500000000011512701023453032353 0ustar kamikami00000000000000{"status":"ERROR","error_message":"Access Denied","message":"Access Denied"} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_account_UNAUTHORIZED.json0000664000175000017500000000007612701023453032054 0ustar kamikami00000000000000{"id":"unauthorized","message":"Unable to authenticate you."} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_actions_page_2.json0000664000175000017500000000107012701023453031267 0ustar kamikami00000000000000{"actions":[{"id":12345670,"status":"completed","type":"create","started_at":"2015-04-10T14:09:12Z","completed_at":"2015-04-10T14:09:38Z","resource_id":1234560,"resource_type":"droplet","region":{"name":"Frankfurt 1","slug":"fra1","sizes":["512mb","1gb","2gb","4gb","8gb","16gb","32gb","48gb","64gb"],"features":["virtio","private_networking","backups","ipv6","metadata"],"available":true},"region_slug":"fra1"}],"links":{"pages":{"first":"https://api.digitalocean.com/v2/actions/?page=1","prev":"https://api.digitalocean.com/v2/actions/?page=1"}},"meta":{"total":2}} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_actions_page_1.json0000664000175000017500000000106712701023453031274 0ustar kamikami00000000000000{"actions":[{"id":12345671,"status":"completed","type":"create","started_at":"2015-04-10T14:09:37Z","completed_at":"2015-04-10T14:10:03Z","resource_id":1234561,"resource_type":"droplet","region":{"name":"Frankfurt 1","slug":"fra1","sizes":["512mb","1gb","2gb","4gb","8gb","16gb","32gb","48gb","64gb"],"features":["virtio","private_networking","backups","ipv6","metadata"],"available":true},"region_slug":"fra1"}],"links":{"pages":{"last":"https://api.digitalocean.com/v2/actions/?page=2","next":"https://api.digitalocean.com/v2/actions/?page=2"}},"meta":{"total":2}} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_actions.json0000664000175000017500000000005512701023453030054 0ustar kamikami00000000000000{"actions":[],"links":{},"meta":{"total":0}} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_account.json0000664000175000017500000000020212701023453030042 0ustar kamikami00000000000000{"account":{"droplet_limit":10,"email":"user@domain.tld","uuid":"a1234567890b1234567890c1234567890d12345","email_verified":true}} apache-libcloud-2.2.1/libcloud/test/common/fixtures/digitalocean/_v2_actions_12345670.json0000664000175000017500000000063412701023453031052 0ustar kamikami00000000000000{"action":{"id":12345670,"status":"completed","type":"power_on","started_at":"2015-03-28T10:57:40Z","completed_at":"2015-03-28T10:57:42Z","resource_id":1234560,"resource_type":"droplet","region":{"name":"New York 3","slug":"nyc3","sizes":["512mb","1gb","2gb","4gb","8gb","16gb","32gb","48gb","64gb"],"features":["virtio","private_networking","backups","ipv6","metadata"],"available":true},"region_slug":"nyc3"}} apache-libcloud-2.2.1/libcloud/test/common/test_ovh.py0000664000175000017500000000223213153541406022670 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from libcloud.test import MockHttp FORMAT_URL = re.compile(r'[./?=-]') class BaseOvhMockHttp(MockHttp): def _get_method_name(self, type, use_param, qs, path): return "_json" def _json(self, method, url, body, headers): meth_name = '_json%s_%s' % (FORMAT_URL.sub('_', url), method.lower()) return getattr(self, meth_name)(method, url, body, headers) apache-libcloud-2.2.1/libcloud/test/common/test_openstack_identity.py0000664000175000017500000010062513153541406026001 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import datetime try: import simplejson as json except ImportError: import json from mock import Mock from libcloud.utils.py3 import httplib from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack_identity import AUTH_TOKEN_EXPIRES_GRACE_SECONDS from libcloud.common.openstack_identity import get_class_for_auth_version from libcloud.common.openstack_identity import OpenStackServiceCatalog from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection_OIDC_access_token from libcloud.common.openstack_identity import OpenStackIdentityUser from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection_VOMS from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.secrets import OPENSTACK_PARAMS from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.compute.test_openstack import OpenStackMockHttp from libcloud.test.compute.test_openstack import OpenStack_2_0_MockHttp class OpenStackIdentityConnectionTestCase(unittest.TestCase): def setUp(self): OpenStackBaseConnection.auth_url = None OpenStackBaseConnection.conn_class = OpenStackMockHttp def test_auth_url_is_correctly_assembled(self): tuples = [ ('1.0', OpenStackMockHttp), ('1.1', OpenStackMockHttp), ('2.0', OpenStack_2_0_MockHttp), ('2.0_apikey', OpenStack_2_0_MockHttp), ('2.0_password', OpenStack_2_0_MockHttp) ] APPEND = 0 NOTAPPEND = 1 auth_urls = [ ('https://auth.api.example.com', APPEND, ''), ('https://auth.api.example.com/', NOTAPPEND, '/'), ('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'), ('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/') ] actions = { '1.0': '/v1.0', '1.1': '/v1.1/auth', '2.0': '/v2.0/tokens', '2.0_apikey': '/v2.0/tokens', '2.0_password': '/v2.0/tokens' } user_id = OPENSTACK_PARAMS[0] key = OPENSTACK_PARAMS[1] for (auth_version, mock_http_class) in tuples: for (url, should_append_default_path, expected_path) in auth_urls: connection = \ self._get_mock_connection(mock_http_class=mock_http_class, auth_url=url) auth_url = connection.auth_url cls = get_class_for_auth_version(auth_version=auth_version) osa = cls(auth_url=auth_url, user_id=user_id, key=key, parent_conn=connection) try: osa = osa.authenticate() except: pass if (should_append_default_path == APPEND): expected_path = actions[auth_version] self.assertEqual(osa.action, expected_path) def test_basic_authentication(self): tuples = [ ('1.0', OpenStackMockHttp), ('1.1', OpenStackMockHttp), ('2.0', OpenStack_2_0_MockHttp), ('2.0_apikey', OpenStack_2_0_MockHttp), ('2.0_password', OpenStack_2_0_MockHttp) ] user_id = OPENSTACK_PARAMS[0] key = OPENSTACK_PARAMS[1] for (auth_version, mock_http_class) in tuples: connection = \ self._get_mock_connection(mock_http_class=mock_http_class) auth_url = connection.auth_url cls = get_class_for_auth_version(auth_version=auth_version) osa = cls(auth_url=auth_url, user_id=user_id, key=key, parent_conn=connection) self.assertEqual(osa.urls, {}) self.assertEqual(osa.auth_token, None) self.assertEqual(osa.auth_user_info, None) osa = osa.authenticate() self.assertTrue(len(osa.urls) >= 1) self.assertTrue(osa.auth_token is not None) if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']: self.assertTrue(osa.auth_token_expires is not None) if auth_version in ['2.0', '2.0_apikey', '2.0_password']: self.assertTrue(osa.auth_user_info is not None) def test_token_expiration_and_force_reauthentication(self): user_id = OPENSTACK_PARAMS[0] key = OPENSTACK_PARAMS[1] connection = self._get_mock_connection(OpenStack_2_0_MockHttp) auth_url = connection.auth_url yesterday = datetime.datetime.today() - datetime.timedelta(1) tomorrow = datetime.datetime.today() + datetime.timedelta(1) osa = OpenStackIdentity_2_0_Connection(auth_url=auth_url, user_id=user_id, key=key, parent_conn=connection) mocked_auth_method = Mock(wraps=osa._authenticate_2_0_with_body) osa._authenticate_2_0_with_body = mocked_auth_method # Force re-auth, expired token osa.auth_token = None osa.auth_token_expires = yesterday count = 5 for i in range(0, count): osa.authenticate(force=True) self.assertEqual(mocked_auth_method.call_count, count) # No force reauth, expired token osa.auth_token = None osa.auth_token_expires = yesterday mocked_auth_method.call_count = 0 self.assertEqual(mocked_auth_method.call_count, 0) for i in range(0, count): osa.authenticate(force=False) self.assertEqual(mocked_auth_method.call_count, 1) # No force reauth, valid / non-expired token osa.auth_token = None mocked_auth_method.call_count = 0 self.assertEqual(mocked_auth_method.call_count, 0) for i in range(0, count): osa.authenticate(force=False) if i == 0: osa.auth_token_expires = tomorrow self.assertEqual(mocked_auth_method.call_count, 1) # No force reauth, valid / non-expired token which is about to expire in # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS soon = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) osa.auth_token = None mocked_auth_method.call_count = 0 self.assertEqual(mocked_auth_method.call_count, 0) for i in range(0, count): if i == 0: osa.auth_token_expires = soon osa.authenticate(force=False) self.assertEqual(mocked_auth_method.call_count, 1) def _get_mock_connection(self, mock_http_class, auth_url=None): OpenStackBaseConnection.conn_class = mock_http_class if auth_url is None: auth_url = "https://auth.api.example.com" OpenStackBaseConnection.auth_url = auth_url connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) connection._ex_force_base_url = "https://www.foo.com" connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) return connection class OpenStackIdentity_2_0_ConnectionTests(unittest.TestCase): def setUp(self): mock_cls = OpenStackIdentity_2_0_MockHttp mock_cls.type = None OpenStackIdentity_2_0_Connection.conn_class = mock_cls self.auth_instance = OpenStackIdentity_2_0_Connection(auth_url='http://none', user_id='test', key='test', tenant_name='test') self.auth_instance.auth_token = 'mock' def test_list_projects(self): result = self.auth_instance.list_projects() self.assertEqual(len(result), 2) self.assertEqual(result[0].id, 'a') self.assertEqual(result[0].name, 'test') self.assertEqual(result[0].description, 'test project') self.assertTrue(result[0].enabled) class OpenStackIdentity_3_0_ConnectionTests(unittest.TestCase): def setUp(self): mock_cls = OpenStackIdentity_3_0_MockHttp mock_cls.type = None OpenStackIdentity_3_0_Connection.conn_class = mock_cls self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', key='test', tenant_name='test') self.auth_instance.auth_token = 'mock' def test_token_scope_argument(self): # Invalid token_scope value expected_msg = 'Invalid value for "token_scope" argument: foo' self.assertRaisesRegexp(ValueError, expected_msg, OpenStackIdentity_3_0_Connection, auth_url='http://none', user_id='test', key='test', token_scope='foo') # Missing tenant_name expected_msg = 'Must provide tenant_name and domain_name argument' self.assertRaisesRegexp(ValueError, expected_msg, OpenStackIdentity_3_0_Connection, auth_url='http://none', user_id='test', key='test', token_scope='project') # Missing domain_name expected_msg = 'Must provide domain_name argument' self.assertRaisesRegexp(ValueError, expected_msg, OpenStackIdentity_3_0_Connection, auth_url='http://none', user_id='test', key='test', token_scope='domain', domain_name=None) # Scope to project all ok OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', key='test', token_scope='project', tenant_name='test', domain_name='Default') # Scope to domain OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', key='test', token_scope='domain', tenant_name=None, domain_name='Default') def test_authenticate(self): auth = OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test_user_id', key='test_key', token_scope='project', tenant_name="test_tenant", domain_name='test_domain') auth.authenticate() def test_list_supported_versions(self): OpenStackIdentity_3_0_MockHttp.type = 'v3' versions = self.auth_instance.list_supported_versions() self.assertEqual(len(versions), 2) self.assertEqual(versions[0].version, 'v2.0') self.assertEqual(versions[0].url, 'http://192.168.18.100:5000/v2.0/') self.assertEqual(versions[1].version, 'v3.0') self.assertEqual(versions[1].url, 'http://192.168.18.100:5000/v3/') def test_list_domains(self): domains = self.auth_instance.list_domains() self.assertEqual(len(domains), 1) self.assertEqual(domains[0].id, 'default') self.assertEqual(domains[0].name, 'Default') self.assertTrue(domains[0].enabled) def test_list_projects(self): projects = self.auth_instance.list_projects() self.assertEqual(len(projects), 4) self.assertEqual(projects[0].id, 'a') self.assertEqual(projects[0].domain_id, 'default') self.assertTrue(projects[0].enabled) self.assertEqual(projects[0].description, 'Test project') def test_list_users(self): users = self.auth_instance.list_users() self.assertEqual(len(users), 12) self.assertEqual(users[0].id, 'a') self.assertEqual(users[0].domain_id, 'default') self.assertEqual(users[0].enabled, True) self.assertEqual(users[0].email, 'openstack-test@localhost') def test_list_roles(self): roles = self.auth_instance.list_roles() self.assertEqual(len(roles), 2) self.assertEqual(roles[1].id, 'b') self.assertEqual(roles[1].name, 'admin') def test_list_user_projects(self): user = self.auth_instance.list_users()[0] projects = self.auth_instance.list_user_projects(user=user) self.assertEqual(len(projects), 0) def test_list_user_domain_roles(self): user = self.auth_instance.list_users()[0] domain = self.auth_instance.list_domains()[0] roles = self.auth_instance.list_user_domain_roles(domain=domain, user=user) self.assertEqual(len(roles), 1) self.assertEqual(roles[0].name, 'admin') def test_get_domain(self): domain = self.auth_instance.get_domain(domain_id='default') self.assertEqual(domain.name, 'Default') def test_create_user(self): user = self.auth_instance.create_user(email='test2@localhost', password='test1', name='test2', domain_id='default') self.assertEqual(user.id, 'c') self.assertEqual(user.name, 'test2') def test_enable_user(self): user = self.auth_instance.list_users()[0] result = self.auth_instance.enable_user(user=user) self.assertTrue(isinstance(result, OpenStackIdentityUser)) def test_disable_user(self): user = self.auth_instance.list_users()[0] result = self.auth_instance.disable_user(user=user) self.assertTrue(isinstance(result, OpenStackIdentityUser)) def test_grant_domain_role_to_user(self): domain = self.auth_instance.list_domains()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] result = self.auth_instance.grant_domain_role_to_user(domain=domain, role=role, user=user) self.assertTrue(result) def test_revoke_domain_role_from_user(self): domain = self.auth_instance.list_domains()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] result = self.auth_instance.revoke_domain_role_from_user(domain=domain, role=role, user=user) self.assertTrue(result) def test_grant_project_role_to_user(self): project = self.auth_instance.list_projects()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] result = self.auth_instance.grant_project_role_to_user(project=project, role=role, user=user) self.assertTrue(result) def test_revoke_project_role_from_user(self): project = self.auth_instance.list_projects()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] result = self.auth_instance.revoke_project_role_from_user(project=project, role=role, user=user) self.assertTrue(result) class OpenStackIdentity_3_0_Connection_OIDC_access_tokenTests( unittest.TestCase): def setUp(self): mock_cls = OpenStackIdentity_3_0_MockHttp mock_cls.type = None OpenStackIdentity_3_0_Connection_OIDC_access_token.conn_class = mock_cls self.auth_instance = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none', user_id='idp', key='token', tenant_name='oidc', domain_name='test_domain') self.auth_instance.auth_token = 'mock' def test_authenticate(self): auth = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none', user_id='idp', key='token', token_scope='project', tenant_name="oidc", domain_name='test_domain') auth.authenticate() class OpenStackIdentity_2_0_Connection_VOMSTests(unittest.TestCase): def setUp(self): mock_cls = OpenStackIdentity_2_0_Connection_VOMSMockHttp mock_cls.type = None OpenStackIdentity_2_0_Connection_VOMS.conn_class = mock_cls self.auth_instance = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none', user_id=None, key='/tmp/proxy.pem', tenant_name='VO') self.auth_instance.auth_token = 'mock' def test_authenticate(self): auth = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none', user_id=None, key='/tmp/proxy.pem', token_scope='test', tenant_name="VO") auth.authenticate() class OpenStackServiceCatalogTestCase(unittest.TestCase): fixtures = ComputeFileFixtures('openstack') def test_parsing_auth_v1_1(self): data = self.fixtures.load('_v1_1__auth.json') data = json.loads(data) service_catalog = data['auth']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='1.0') entries = catalog.get_entries() self.assertEqual(len(entries), 3) entry = [e for e in entries if e.service_type == 'cloudFilesCDN'][0] self.assertEqual(entry.service_type, 'cloudFilesCDN') self.assertEqual(entry.service_name, None) self.assertEqual(len(entry.endpoints), 2) self.assertEqual(entry.endpoints[0].region, 'ORD') self.assertEqual(entry.endpoints[0].url, 'https://cdn2.clouddrive.com/v1/MossoCloudFS') self.assertEqual(entry.endpoints[0].endpoint_type, 'external') self.assertEqual(entry.endpoints[1].region, 'LON') self.assertEqual(entry.endpoints[1].endpoint_type, 'external') def test_parsing_auth_v2(self): data = self.fixtures.load('_v2_0__auth.json') data = json.loads(data) service_catalog = data['access']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='2.0') entries = catalog.get_entries() self.assertEqual(len(entries), 6) entry = [e for e in entries if e.service_name == 'cloudServers'][0] self.assertEqual(entry.service_type, 'compute') self.assertEqual(entry.service_name, 'cloudServers') self.assertEqual(len(entry.endpoints), 1) self.assertEqual(entry.endpoints[0].region, None) self.assertEqual(entry.endpoints[0].url, 'https://servers.api.rackspacecloud.com/v1.0/1337') self.assertEqual(entry.endpoints[0].endpoint_type, 'external') def test_parsing_auth_v3(self): data = self.fixtures.load('_v3__auth.json') data = json.loads(data) service_catalog = data['token']['catalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='3.x') entries = catalog.get_entries() self.assertEqual(len(entries), 6) entry = [e for e in entries if e.service_type == 'volume'][0] self.assertEqual(entry.service_type, 'volume') self.assertEqual(entry.service_name, None) self.assertEqual(len(entry.endpoints), 3) self.assertEqual(entry.endpoints[0].region, 'regionOne') self.assertEqual(entry.endpoints[0].endpoint_type, 'external') self.assertEqual(entry.endpoints[1].region, 'regionOne') self.assertEqual(entry.endpoints[1].endpoint_type, 'admin') self.assertEqual(entry.endpoints[2].region, 'regionOne') self.assertEqual(entry.endpoints[2].endpoint_type, 'internal') def test_get_public_urls(self): data = self.fixtures.load('_v2_0__auth.json') data = json.loads(data) service_catalog = data['access']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='2.0') public_urls = catalog.get_public_urls(service_type='object-store') expected_urls = ['https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111', 'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111'] self.assertEqual(public_urls, expected_urls) def test_get_regions(self): data = self.fixtures.load('_v2_0__auth.json') data = json.loads(data) service_catalog = data['access']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='2.0') regions = catalog.get_regions(service_type='object-store') self.assertEqual(regions, ['LON', 'ORD']) regions = catalog.get_regions(service_type='invalid') self.assertEqual(regions, []) def test_get_service_types(self): data = self.fixtures.load('_v2_0__auth.json') data = json.loads(data) service_catalog = data['access']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='2.0') service_types = catalog.get_service_types() self.assertEqual(service_types, ['compute', 'object-store', 'rax:object-cdn']) service_types = catalog.get_service_types(region='ORD') self.assertEqual(service_types, ['rax:object-cdn']) def test_get_service_names(self): data = self.fixtures.load('_v2_0__auth.json') data = json.loads(data) service_catalog = data['access']['serviceCatalog'] catalog = OpenStackServiceCatalog(service_catalog=service_catalog, auth_version='2.0') service_names = catalog.get_service_names() self.assertEqual(service_names, ['cloudFiles', 'cloudFilesCDN', 'cloudServers', 'cloudServersOpenStack', 'cloudServersPreprod', 'nova']) service_names = catalog.get_service_names(service_type='compute') self.assertEqual(service_names, ['cloudServers', 'cloudServersOpenStack', 'cloudServersPreprod', 'nova']) class OpenStackIdentity_2_0_MockHttp(MockHttp): fixtures = ComputeFileFixtures('openstack_identity/v2') json_content_headers = {'content-type': 'application/json; charset=UTF-8'} def _v2_0_tenants(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v2_0_tenants.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() class OpenStackIdentity_3_0_MockHttp(MockHttp): fixtures = ComputeFileFixtures('openstack_identity/v3') json_content_headers = {'content-type': 'application/json; charset=UTF-8'} def _v3(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v3_versions.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_domains(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v3_domains.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_projects(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v3_projects.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_auth_tokens(self, method, url, body, headers): if method == 'POST': status = httplib.OK data = json.loads(body) if 'password' in data['auth']['identity']: if data['auth']['identity']['password']['user']['domain']['name'] != 'test_domain' or \ data['auth']['scope']['project']['domain']['name'] != 'test_domain': status = httplib.UNAUTHORIZED body = ComputeFileFixtures('openstack').load('_v3__auth.json') headers = self.json_content_headers.copy() headers['x-subject-token'] = '00000000000000000000000000000000' return (status, body, headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_users(self, method, url, body, headers): if method == 'GET': # list users body = self.fixtures.load('v3_users.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == 'POST': # create user body = self.fixtures.load('v3_create_user.json') return (httplib.CREATED, body, self.json_content_headers, httplib.responses[httplib.CREATED]) raise NotImplementedError() def _v3_users_a(self, method, url, body, headers): if method == 'PATCH': # enable / disable user body = self.fixtures.load('v3_users_a.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_roles(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v3_roles.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_domains_default_users_a_roles_a(self, method, url, body, headers): if method == 'PUT': # grant domain role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) elif method == 'DELETE': # revoke domain role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) raise NotImplementedError() def _v3_projects_a_users_a_roles_a(self, method, url, body, headers): if method == 'PUT': # grant project role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) elif method == 'DELETE': # revoke project role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) raise NotImplementedError() def _v3_domains_default(self, method, url, body, headers): if method == 'GET': # get domain body = self.fixtures.load('v3_domains_default.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_users_a_projects(self, method, url, body, headers): if method == 'GET': # get user projects body = self.fixtures.load('v3_users_a_projects.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_domains_default_users_a_roles(self, method, url, body, headers): if method == 'GET': # get user domain roles body = self.fixtures.load('v3_domains_default_users_a_roles.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_OS_FEDERATION_identity_providers_idp_protocols_oidc_auth(self, method, url, body, headers): if method == 'GET': headers = self.json_content_headers.copy() headers['x-subject-token'] = '00000000000000000000000000000000' return (httplib.OK, body, headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v3_OS_FEDERATION_projects(self, method, url, body, headers): if method == 'GET': # get user projects body = json.dumps({"projects": [{"id": "project_id"}]}) return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() class OpenStackIdentity_2_0_Connection_VOMSMockHttp(MockHttp): fixtures = ComputeFileFixtures('openstack_identity/v2') json_content_headers = {'content-type': 'application/json; charset=UTF-8'} def _v2_0_tokens(self, method, url, body, headers): if method == 'POST': status = httplib.UNAUTHORIZED data = json.loads(body) if 'voms' in data['auth'] and data['auth']['voms'] is True: status = httplib.OK body = ComputeFileFixtures('openstack').load('_v2_0__auth.json') headers = self.json_content_headers.copy() headers['x-subject-token'] = '00000000000000000000000000000000' return (status, body, headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v2_0_tenants(self, method, url, body, headers): if method == 'GET': # get user projects body = json.dumps({"tenant": [{"name": "tenant_name"}]}) return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_cloudstack.py0000664000175000017500000001617413153541406024242 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import b from libcloud.utils.py3 import parse_qsl from libcloud.common.cloudstack import CloudStackConnection from libcloud.common.types import MalformedResponseError from libcloud.test import MockHttp async_delay = 0 class CloudStackMockDriver(object): host = 'nonexistent.' path = '/path' async_poll_frequency = 0 name = 'fake' async_delay = 0 class CloudStackCommonTest(unittest.TestCase): def setUp(self): CloudStackConnection.conn_class = CloudStackMockHttp self.connection = CloudStackConnection('apikey', 'secret', host=CloudStackMockDriver.host) self.connection.poll_interval = 0.0 self.driver = self.connection.driver = CloudStackMockDriver() def test_sync_request_bad_response(self): self.driver.path = '/bad/response' try: self.connection._sync_request('fake') except Exception: e = sys.exc_info()[1] self.assertTrue(isinstance(e, MalformedResponseError)) return self.assertTrue(False) def test_sync_request(self): self.driver.path = '/sync' self.connection._sync_request('fake') def test_async_request_successful(self): self.driver.path = '/async/success' result = self.connection._async_request('fake') self.assertEqual(result, {'fake': 'result'}) def test_async_request_unsuccessful(self): self.driver.path = '/async/fail' try: self.connection._async_request('fake') except Exception: e = sys.exc_info()[1] self.assertEqual(CloudStackMockHttp.ERROR_TEXT, str(e)) return self.assertFalse(True) def test_async_request_delayed(self): global async_delay self.driver.path = '/async/delayed' async_delay = 2 self.connection._async_request('fake') self.assertEqual(async_delay, 0) def test_signature_algorithm(self): cases = [ ( { 'command': 'listVirtualMachines' }, 'z/a9Y7J52u48VpqIgiwaGUMCso0=' ), ( { 'command': 'deployVirtualMachine', 'name': 'fred', 'displayname': 'George', 'serviceofferingid': 5, 'templateid': 17, 'zoneid': 23, 'networkids': 42 }, 'gHTo7mYmadZ+zluKHzlEKb1i/QU=' ), ( { 'command': 'deployVirtualMachine', 'name': 'fred', 'displayname': 'George+Ringo', 'serviceofferingid': 5, 'templateid': 17, 'zoneid': 23, 'networkids': 42 }, 'tAgfrreI1ZvWlWLClD3gu4+aKv4=' ) ] connection = CloudStackConnection('fnord', 'abracadabra') for case in cases: params = connection.add_default_params(case[0]) self.assertEqual(connection._make_signature(params), b(case[1])) class CloudStackMockHttp(MockHttp, unittest.TestCase): ERROR_TEXT = 'ERROR TEXT' def _response(self, status, result, response): return (status, json.dumps(result), {}, response) def _check_request(self, url): url = urlparse.urlparse(url) query = dict(parse_qsl(url.query)) self.assertTrue('apiKey' in query) self.assertTrue('command' in query) self.assertTrue('response' in query) self.assertTrue('signature' in query) self.assertTrue(query['response'] == 'json') return query def _bad_response(self, method, url, body, headers): self._check_request(url) result = {'success': True} return self._response(httplib.OK, result, httplib.responses[httplib.OK]) def _sync(self, method, url, body, headers): query = self._check_request(url) result = {query['command'].lower() + 'response': {}} return self._response(httplib.OK, result, httplib.responses[httplib.OK]) def _async_success(self, method, url, body, headers): query = self._check_request(url) if query['command'].lower() == 'queryasyncjobresult': self.assertEqual(query['jobid'], '42') result = { query['command'].lower() + 'response': { 'jobstatus': 1, 'jobresult': {'fake': 'result'} } } else: result = {query['command'].lower() + 'response': {'jobid': '42'}} return self._response(httplib.OK, result, httplib.responses[httplib.OK]) def _async_fail(self, method, url, body, headers): query = self._check_request(url) if query['command'].lower() == 'queryasyncjobresult': self.assertEqual(query['jobid'], '42') result = { query['command'].lower() + 'response': { 'jobstatus': 2, 'jobresult': {'errortext': self.ERROR_TEXT} } } else: result = {query['command'].lower() + 'response': {'jobid': '42'}} return self._response(httplib.OK, result, httplib.responses[httplib.OK]) def _async_delayed(self, method, url, body, headers): global async_delay query = self._check_request(url) if query['command'].lower() == 'queryasyncjobresult': self.assertEqual(query['jobid'], '42') if async_delay == 0: result = { query['command'].lower() + 'response': { 'jobstatus': 1, 'jobresult': {'fake': 'result'} } } else: result = { query['command'].lower() + 'response': { 'jobstatus': 0, } } async_delay -= 1 else: result = {query['command'].lower() + 'response': {'jobid': '42'}} return self._response(httplib.OK, result, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_aws.py0000664000175000017500000003222612704377751022707 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from datetime import datetime import mock from libcloud.common.aws import AWSRequestSignerAlgorithmV4 from libcloud.common.aws import SignedAWSConnection from libcloud.common.aws import UNSIGNED_PAYLOAD from libcloud.test import LibcloudTestCase class EC2MockDriver(object): region_name = 'my_region' class AWSRequestSignerAlgorithmV4TestCase(LibcloudTestCase): def setUp(self): SignedAWSConnection.driver = EC2MockDriver() SignedAWSConnection.service_name = 'my_service' SignedAWSConnection.version = '2013-10-15' self.connection = SignedAWSConnection('my_key', 'my_secret') self.signer = AWSRequestSignerAlgorithmV4(access_key='my_key', access_secret='my_secret', version='2013-10-15', connection=self.connection) SignedAWSConnection.action = '/my_action/' SignedAWSConnection.driver = EC2MockDriver() self.now = datetime(2015, 3, 4, hour=17, minute=34, second=52) def test_v4_signature(self): params = { 'Action': 'DescribeInstances', 'Version': '2013-10-15' } headers = { 'Host': 'ec2.eu-west-1.amazonaws.com', 'Accept-Encoding': 'gzip,deflate', 'X-AMZ-Date': '20150304T173452Z', 'User-Agent': 'libcloud/0.17.0 (Amazon EC2 (eu-central-1)) ' } dt = self.now sig = self.signer._get_authorization_v4_header(params=params, headers=headers, dt=dt, method='GET', path='/my_action/') self.assertEqual(sig, 'AWS4-HMAC-SHA256 ' 'Credential=my_key/20150304/my_region/my_service/aws4_request, ' 'SignedHeaders=accept-encoding;host;user-agent;x-amz-date, ' 'Signature=f9868f8414b3c3f856c7955019cc1691265541f5162b9b772d26044280d39bd3') def test_v4_signature_contains_user_id(self): sig = self.signer._get_authorization_v4_header(params={}, headers={}, dt=self.now) self.assertIn('Credential=my_key/', sig) def test_v4_signature_contains_credential_scope(self): with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_credential_scope') as mock_get_creds: mock_get_creds.return_value = 'my_credential_scope' sig = self.signer._get_authorization_v4_header(params={}, headers={}, dt=self.now) self.assertIn('Credential=my_key/my_credential_scope, ', sig) def test_v4_signature_contains_signed_headers(self): with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signed_headers') as mock_get_headers: mock_get_headers.return_value = 'my_signed_headers' sig = self.signer._get_authorization_v4_header({}, {}, self.now, method='GET', path='/') self.assertIn('SignedHeaders=my_signed_headers, ', sig) def test_v4_signature_contains_signature(self): with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signature') as mock_get_signature: mock_get_signature.return_value = 'my_signature' sig = self.signer._get_authorization_v4_header({}, {}, self.now) self.assertIn('Signature=my_signature', sig) def test_get_signature_(self): def _sign(key, msg, hex=False): if hex: return 'H|%s|%s' % (key, msg) else: return '%s|%s' % (key, msg) with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_key_to_sign_with') as mock_get_key: with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_string_to_sign') as mock_get_string: with mock.patch('libcloud.common.aws._sign', new=_sign): mock_get_key.return_value = 'my_signing_key' mock_get_string.return_value = 'my_string_to_sign' sig = self.signer._get_signature({}, {}, self.now, method='GET', path='/', data=None) self.assertEqual(sig, 'H|my_signing_key|my_string_to_sign') def test_get_string_to_sign(self): with mock.patch('hashlib.sha256') as mock_sha256: mock_sha256.return_value.hexdigest.return_value = 'chksum_of_canonical_request' to_sign = self.signer._get_string_to_sign({}, {}, self.now, method='GET', path='/', data=None) self.assertEqual(to_sign, 'AWS4-HMAC-SHA256\n' '20150304T173452Z\n' '20150304/my_region/my_service/aws4_request\n' 'chksum_of_canonical_request') def test_get_key_to_sign_with(self): def _sign(key, msg, hex=False): return '%s|%s' % (key, msg) with mock.patch('libcloud.common.aws._sign', new=_sign): key = self.signer._get_key_to_sign_with(self.now) self.assertEqual(key, 'AWS4my_secret|20150304|my_region|my_service|aws4_request') def test_get_signed_headers_contains_all_headers_lowercased(self): headers = {'Content-Type': 'text/plain', 'Host': 'my_host', 'X-Special-Header': ''} signed_headers = self.signer._get_signed_headers(headers) self.assertIn('content-type', signed_headers) self.assertIn('host', signed_headers) self.assertIn('x-special-header', signed_headers) def test_get_signed_headers_concats_headers_sorted_lexically(self): headers = {'Host': 'my_host', 'X-Special-Header': '', '1St-Header': '2', 'Content-Type': 'text/plain'} signed_headers = self.signer._get_signed_headers(headers) self.assertEqual(signed_headers, '1st-header;content-type;host;x-special-header') def test_get_credential_scope(self): scope = self.signer._get_credential_scope(self.now) self.assertEqual(scope, '20150304/my_region/my_service/aws4_request') def test_get_canonical_headers_joins_all_headers(self): headers = { 'accept-encoding': 'gzip,deflate', 'host': 'my_host', } self.assertEqual(self.signer._get_canonical_headers(headers), 'accept-encoding:gzip,deflate\n' 'host:my_host\n') def test_get_canonical_headers_sorts_headers_lexically(self): headers = { 'accept-encoding': 'gzip,deflate', 'host': 'my_host', '1st-header': '2', 'x-amz-date': '20150304T173452Z', 'user-agent': 'my-ua' } self.assertEqual(self.signer._get_canonical_headers(headers), '1st-header:2\n' 'accept-encoding:gzip,deflate\n' 'host:my_host\n' 'user-agent:my-ua\n' 'x-amz-date:20150304T173452Z\n') def test_get_canonical_headers_lowercases_headers_names(self): headers = { 'Accept-Encoding': 'GZIP,DEFLATE', 'User-Agent': 'My-UA' } self.assertEqual(self.signer._get_canonical_headers(headers), 'accept-encoding:GZIP,DEFLATE\n' 'user-agent:My-UA\n') def test_get_canonical_headers_trims_header_values(self): # TODO: according to AWS spec (and RFC 2616 Section 4.2.) excess whitespace # from inside non-quoted strings should be stripped. Now we only strip the # start and end of the string. See # http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html headers = { 'accept-encoding': ' gzip,deflate', 'user-agent': 'libcloud/0.17.0 ' } self.assertEqual(self.signer._get_canonical_headers(headers), 'accept-encoding:gzip,deflate\n' 'user-agent:libcloud/0.17.0\n') def test_get_request_params_joins_params_sorted_lexically(self): self.assertEqual(self.signer._get_request_params({ 'Action': 'DescribeInstances', 'Filter.1.Name': 'state', 'Version': '2013-10-15' }), 'Action=DescribeInstances&Filter.1.Name=state&Version=2013-10-15') def test_get_canonical_headers_allow_numeric_header_value(self): headers = { 'Accept-Encoding': 'gzip,deflate', 'Content-Length': 314 } self.assertEqual(self.signer._get_canonical_headers(headers), 'accept-encoding:gzip,deflate\n' 'content-length:314\n') def test_get_request_params_allows_integers_as_value(self): self.assertEqual(self.signer._get_request_params({'Action': 'DescribeInstances', 'Port': 22}), 'Action=DescribeInstances&Port=22') def test_get_request_params_urlquotes_params_keys(self): self.assertEqual(self.signer._get_request_params({'Action+Reaction': 'DescribeInstances'}), 'Action%2BReaction=DescribeInstances') def test_get_request_params_urlquotes_params_values(self): self.assertEqual(self.signer._get_request_params({ 'Action': 'DescribeInstances&Addresses', 'Port-Range': '2000 3000' }), 'Action=DescribeInstances%26Addresses&Port-Range=2000%203000') def test_get_request_params_urlquotes_params_values_allows_safe_chars_in_value(self): # http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html self.assertEqual('Action=a~b.c_d-e', self.signer._get_request_params({'Action': 'a~b.c_d-e'})) def test_get_payload_hash_returns_digest_of_empty_string_for_GET_requests(self): SignedAWSConnection.method = 'GET' self.assertEqual(self.signer._get_payload_hash(method='GET'), 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855') def test_get_payload_hash_with_data_for_PUT_requests(self): SignedAWSConnection.method = 'PUT' self.assertEqual(self.signer._get_payload_hash(method='PUT', data='DUMMY'), 'ceec12762e66397b56dad64fd270bb3d694c78fb9cd665354383c0626dbab013') def test_get_payload_hash_with_empty_data_for_POST_requests(self): SignedAWSConnection.method = 'POST' self.assertEqual(self.signer._get_payload_hash(method='POST'), UNSIGNED_PAYLOAD) def test_get_canonical_request(self): req = self.signer._get_canonical_request( {'Action': 'DescribeInstances', 'Version': '2013-10-15'}, {'Accept-Encoding': 'gzip,deflate', 'User-Agent': 'My-UA'}, method='GET', path='/my_action/', data=None ) self.assertEqual(req, 'GET\n' '/my_action/\n' 'Action=DescribeInstances&Version=2013-10-15\n' 'accept-encoding:gzip,deflate\n' 'user-agent:My-UA\n' '\n' 'accept-encoding;user-agent\n' 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855') def test_post_canonical_request(self): req = self.signer._get_canonical_request( {'Action': 'DescribeInstances', 'Version': '2013-10-15'}, {'Accept-Encoding': 'gzip,deflate', 'User-Agent': 'My-UA'}, method='POST', path='/my_action/', data='{}' ) self.assertEqual(req, 'POST\n' '/my_action/\n' 'Action=DescribeInstances&Version=2013-10-15\n' 'accept-encoding:gzip,deflate\n' 'user-agent:My-UA\n' '\n' 'accept-encoding;user-agent\n' '44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_digitalocean_v2.py0000664000175000017500000001004313153541406025125 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import sys import unittest from libcloud.common.types import InvalidCredsError from libcloud.common.digitalocean import DigitalOceanBaseDriver from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test.file_fixtures import FileFixtures from libcloud.test.secrets import DIGITALOCEAN_v2_PARAMS from libcloud.utils.py3 import httplib class DigitalOceanTests(LibcloudTestCase): def setUp(self): DigitalOceanBaseDriver.connectionCls.conn_class = \ DigitalOceanMockHttp DigitalOceanMockHttp.type = None self.driver = DigitalOceanBaseDriver(*DIGITALOCEAN_v2_PARAMS) def test_authentication(self): DigitalOceanMockHttp.type = 'UNAUTHORIZED' self.assertRaises(InvalidCredsError, self.driver.ex_account_info) def test_ex_account_info(self): account_info = self.driver.ex_account_info() self.assertEqual(account_info['uuid'], 'a1234567890b1234567890c1234567890d12345') self.assertTrue(account_info['email_verified']) self.assertEqual(account_info['email'], 'user@domain.tld') self.assertEqual(account_info['droplet_limit'], 10) def test_ex_list_events(self): events = self.driver.ex_list_events() self.assertEqual(events, []) def test_ex_get_event(self): action = self.driver.ex_get_event('12345670') self.assertEqual(action["id"], 12345670) self.assertEqual(action["status"], "completed") self.assertEqual(action["type"], "power_on") def test__paginated_request(self): DigitalOceanMockHttp.type = 'page_1' actions = self.driver._paginated_request('/v2/actions', 'actions') self.assertEqual(actions[0]['id'], 12345671) self.assertEqual(actions[0]['status'], 'completed') class DigitalOceanMockHttp(MockHttp): fixtures = FileFixtures('common', 'digitalocean') response = { None: httplib.OK, 'CREATE': httplib.CREATED, 'DELETE': httplib.NO_CONTENT, 'EMPTY': httplib.OK, 'NOT_FOUND': httplib.NOT_FOUND, 'UNAUTHORIZED': httplib.UNAUTHORIZED, 'UPDATE': httplib.OK } def _v2_account(self, method, url, body, headers): body = self.fixtures.load('_v2_account.json') return (self.response[self.type], body, {}, httplib.responses[self.response[self.type]]) def _v2_account_UNAUTHORIZED(self, method, url, body, headers): body = self.fixtures.load( '_v2_account_UNAUTHORIZED.json') return (self.response[self.type], body, {}, httplib.responses[self.response[self.type]]) def _v2_actions(self, method, url, body, headers): body = self.fixtures.load('_v2_actions.json') return (self.response[self.type], body, {}, httplib.responses[self.response[self.type]]) def _v2_actions_12345670(self, method, url, body, headers): body = self.fixtures.load('_v2_actions_12345670.json') return (self.response[self.type], body, {}, httplib.responses[self.response[self.type]]) def _v2_actions_page_1(self, method, url, body, headers): body = self.fixtures.load('_v2_actions_page_1.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_nfsn.py0000664000175000017500000000527013153541406023045 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import string import sys import unittest from mock import Mock, patch from libcloud.common.nfsn import NFSNConnection from libcloud.test import LibcloudTestCase, MockHttp from libcloud.utils.py3 import httplib mock_time = Mock() mock_time.return_value = 1000000 mock_salt = Mock() mock_salt.return_value = 'yumsalty1234' mock_header = 'testid;1000000;yumsalty1234;66dfb282a9532e5b8e6a9517764d5fbc001a4a2e' class NFSNConnectionTestCase(LibcloudTestCase): def setUp(self): NFSNConnection.conn_class = NFSNMockHttp NFSNMockHttp.type = None self.driver = NFSNConnection('testid', 'testsecret') def test_salt_length(self): self.assertEqual(16, len(self.driver._salt())) def test_salt_is_unique(self): s1 = self.driver._salt() s2 = self.driver._salt() self.assertNotEqual(s1, s2) def test_salt_characters(self): """ salt must be alphanumeric """ salt_characters = string.ascii_letters + string.digits for c in self.driver._salt(): self.assertIn(c, salt_characters) @patch('time.time', mock_time) def test_timestamp(self): """ Check that timestamp uses time.time """ self.assertEqual('1000000', self.driver._timestamp()) @patch('time.time', mock_time) @patch('libcloud.common.nfsn.NFSNConnection._salt', mock_salt) def test_auth_header(self): """ Check that X-NFSN-Authentication is set """ response = self.driver.request(action='/testing') self.assertEqual(httplib.OK, response.status) class NFSNMockHttp(MockHttp): def _testing(self, method, url, body, headers): if headers['X-NFSN-Authentication'] == mock_header: return (httplib.OK, '', {}, httplib.responses[httplib.OK]) else: return (httplib.UNAUTHORIZED, '', {}, httplib.responses[httplib.UNAUTHORIZED]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_openstack.py0000664000175000017500000000423613153541406024071 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from mock import Mock from libcloud.common.base import LibcloudConnection from libcloud.common.openstack import OpenStackBaseConnection from libcloud.utils.py3 import PY25 class OpenStackBaseConnectionTest(unittest.TestCase): def setUp(self): self.timeout = 10 OpenStackBaseConnection.conn_class = Mock() self.connection = OpenStackBaseConnection('foo', 'bar', timeout=self.timeout, ex_force_auth_url='https://127.0.0.1') self.connection.driver = Mock() self.connection.driver.name = 'OpenStackDriver' def tearDown(self): OpenStackBaseConnection.conn_class = LibcloudConnection def test_base_connection_timeout(self): self.connection.connect() self.assertEqual(self.connection.timeout, self.timeout) if PY25: self.connection.conn_class.assert_called_with(host='127.0.0.1', port=443) else: self.connection.conn_class.assert_called_with(host='127.0.0.1', secure=1, port=443, timeout=10) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_base.py0000664000175000017500000000404712704377751023027 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys import mock from libcloud.common.base import LazyObject from libcloud.test import LibcloudTestCase class LazyObjectTest(LibcloudTestCase): class A(LazyObject): def __init__(self, x, y=None): self.x = x self.y = y def test_lazy_init(self): # Test normal init a = self.A(1, y=2) self.assertTrue(isinstance(a, self.A)) # Test lazy init with mock.patch.object(self.A, '__init__', return_value=None) as mock_init: a = self.A.lazy(3, y=4) self.assertTrue(isinstance(a, self.A)) # Proxy is a subclass of A mock_init.assert_not_called() # Since we have a mock init, an A object doesn't actually get # created. But, we can still call __dict__ on the proxy, which will # init the lazy object. self.assertEqual(a.__dict__, {}) mock_init.assert_called_once_with(3, y=4) def test_setattr(self): a = self.A.lazy('foo', y='bar') a.z = 'baz' wrapped_lazy_obj = object.__getattribute__(a, '_lazy_obj') self.assertEqual(a.z, 'baz') self.assertEqual(wrapped_lazy_obj.z, 'baz') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_aliyun.py0000664000175000017500000000415512704377751023416 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.common import aliyun from libcloud.common.aliyun import AliyunRequestSignerAlgorithmV1_0 from libcloud.test import LibcloudTestCase class AliyunRequestSignerAlgorithmV1_0TestCase(LibcloudTestCase): def setUp(self): self.signer = AliyunRequestSignerAlgorithmV1_0('testid', 'testsecret', '1.0') def test_sign_request(self): params = {'TimeStamp': '2012-12-26T10:33:56Z', 'Format': 'XML', 'AccessKeyId': 'testid', 'Action': 'DescribeRegions', 'SignatureMethod': 'HMAC-SHA1', 'RegionId': 'region1', 'SignatureNonce': 'NwDAxvLU6tFE0DVb', 'Version': '2014-05-26', 'SignatureVersion': '1.0'} method = 'GET' path = '/' expected = 'K9fCVP6Jrklpd3rLYKh1pfrrFNo=' self.assertEqual(expected, self.signer._sign_request(params, method, path)) class AliyunCommonTestCase(LibcloudTestCase): def test_percent_encode(self): data = { 'abc': 'abc', ' *~': '%20%2A~' } for key in data: self.assertEqual(data[key], aliyun._percent_encode(key)) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_base_driver.py0000664000175000017500000000501713153541406024365 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from mock import Mock from libcloud.common.base import BaseDriver from libcloud.test import unittest class BaseDriverTestCase(unittest.TestCase): def test_timeout_argument_propagation_and_preservation(self): class DummyDriver1(BaseDriver): pass # 1. No timeout provided DummyDriver1.connectionCls = Mock() DummyDriver1(key='foo') call_kwargs = DummyDriver1.connectionCls.call_args[1] self.assertEqual(call_kwargs['timeout'], None) self.assertEqual(call_kwargs['retry_delay'], None) # 2. Timeout provided as constructor argument class DummyDriver1(BaseDriver): pass DummyDriver1.connectionCls = Mock() DummyDriver1(key='foo', timeout=12) call_kwargs = DummyDriver1.connectionCls.call_args[1] self.assertEqual(call_kwargs['timeout'], 12) self.assertEqual(call_kwargs['retry_delay'], None) # 3. timeout provided via "_ex_connection_class_kwargs" method class DummyDriver2(BaseDriver): def _ex_connection_class_kwargs(self): result = {} result['timeout'] = 13 return result DummyDriver2.connectionCls = Mock() DummyDriver2(key='foo') call_kwargs = DummyDriver2.connectionCls.call_args[1] # 4. Value provided via "_ex_connection_class_kwargs" and constructor, # constructor should win DummyDriver2.connectionCls = Mock() DummyDriver2(key='foo', timeout=14, retry_delay=10) call_kwargs = DummyDriver2.connectionCls.call_args[1] self.assertEqual(call_kwargs['timeout'], 14) self.assertEqual(call_kwargs['retry_delay'], 10) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/common/test_gandi.py0000664000175000017500000000241512701023453023154 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import xmlrpclib from libcloud.test import MockHttp class BaseGandiMockHttp(MockHttp): def _get_method_name(self, type, use_param, qs, path): return "_xmlrpc" def _xmlrpc(self, method, url, body, headers): params, methodName = xmlrpclib.loads(body) meth_name = '_xmlrpc__' + methodName.replace('.', '_') if self.type: meth_name = '%s_%s' % (meth_name, self.type) return getattr(self, meth_name)(method, url, body, headers) apache-libcloud-2.2.1/libcloud/test/common/test_google.py0000664000175000017500000003667413153541406023371 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for Google Connection classes. """ import datetime import mock import os import sys import unittest try: import simplejson as json except ImportError: import json from libcloud.common.google import (GoogleAuthError, GoogleAuthType, GoogleBaseAuthConnection, GoogleInstalledAppAuthConnection, GoogleServiceAcctAuthConnection, GoogleGCEServiceAcctAuthConnection, GoogleOAuth2Credential, GoogleBaseConnection, _utcnow, _utc_timestamp) from libcloud.test import MockHttp, LibcloudTestCase from libcloud.utils.py3 import httplib # Skip some tests if PyCrypto is unavailable try: from Crypto.Hash import SHA256 except ImportError: SHA256 = None SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) PEM_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.pem') JSON_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.json') with open(JSON_KEY, 'r') as f: KEY_STR = json.loads(f.read())['private_key'] GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key') GCE_PARAMS_PEM_KEY = ('email@developer.gserviceaccount.com', PEM_KEY) GCE_PARAMS_JSON_KEY = ('email@developer.gserviceaccount.com', JSON_KEY) GCE_PARAMS_KEY = ('email@developer.gserviceaccount.com', KEY_STR) GCE_PARAMS_IA = ('client_id', 'client_secret') GCE_PARAMS_GCE = ('foo', 'bar') GCS_S3_PARAMS = ('GOOG0123456789ABCXYZ', # GOOG + 16 alphanumeric chars '0102030405060708091011121314151617181920') # 40 base64 chars STUB_UTCNOW = _utcnow() STUB_TOKEN = { 'access_token': 'tokentoken', 'token_type': 'Bearer', 'expires_in': 3600 } STUB_IA_TOKEN = { 'access_token': 'installedapp', 'token_type': 'Bearer', 'expires_in': 3600, 'refresh_token': 'refreshrefresh' } STUB_REFRESH_TOKEN = { 'access_token': 'refreshrefresh', 'token_type': 'Bearer', 'expires_in': 3600 } STUB_TOKEN_FROM_FILE = { 'access_token': 'token_from_file', 'token_type': 'Bearer', 'expire_time': _utc_timestamp(STUB_UTCNOW + datetime.timedelta(seconds=3600)), 'expires_in': 3600 } class MockJsonResponse(object): def __init__(self, body): self.object = body class GoogleTestCase(LibcloudTestCase): """ Assists in making Google tests hermetic and deterministic. Add anything that needs to be mocked here. Create a patcher with the suffix '_patcher'. e.g. _foo_patcher = mock.patch('module.submodule.class.foo', ...) Patchers are started at setUpClass and stopped at tearDownClass. Ideally, you should make a note in the thing being mocked, for clarity. """ PATCHER_SUFFIX = '_patcher' _utcnow_patcher = mock.patch( 'libcloud.common.google._utcnow', return_value=STUB_UTCNOW) _authtype_is_gce_patcher = mock.patch( 'libcloud.common.google.GoogleAuthType._is_gce', return_value=False) _read_token_file_patcher = mock.patch( 'libcloud.common.google.GoogleOAuth2Credential._get_token_from_file', return_value=STUB_TOKEN_FROM_FILE ) _write_token_file_patcher = mock.patch( 'libcloud.common.google.GoogleOAuth2Credential._write_token_to_file') _ia_get_code_patcher = mock.patch( 'libcloud.common.google.GoogleInstalledAppAuthConnection.get_code', return_value=1234 ) @classmethod def setUpClass(cls): super(GoogleTestCase, cls).setUpClass() for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]: getattr(cls, patcher).start() @classmethod def tearDownClass(cls): super(GoogleTestCase, cls).tearDownClass() for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]: getattr(cls, patcher).stop() class GoogleBaseAuthConnectionTest(GoogleTestCase): """ Tests for GoogleBaseAuthConnection """ def setUp(self): GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp self.mock_scopes = ['foo', 'bar'] kwargs = {'scopes': self.mock_scopes} self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs) def test_scopes(self): self.assertEqual(self.conn.scopes, 'foo bar') def test_add_default_headers(self): old_headers = {} expected_headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Host': 'accounts.google.com'} new_headers = self.conn.add_default_headers(old_headers) self.assertEqual(new_headers, expected_headers) def test_token_request(self): request_body = {'code': 'asdf', 'client_id': self.conn.user_id, 'client_secret': self.conn.key, 'redirect_uri': self.conn.redirect_uri, 'grant_type': 'authorization_code'} new_token = self.conn._token_request(request_body) self.assertEqual(new_token['access_token'], STUB_IA_TOKEN['access_token']) exp = STUB_UTCNOW + datetime.timedelta( seconds=STUB_IA_TOKEN['expires_in']) self.assertEqual(new_token['expire_time'], _utc_timestamp(exp)) class GoogleInstalledAppAuthConnectionTest(GoogleTestCase): """ Tests for GoogleInstalledAppAuthConnection """ def setUp(self): GoogleInstalledAppAuthConnection.conn_class = GoogleAuthMockHttp self.mock_scopes = ['https://www.googleapis.com/auth/foo'] kwargs = {'scopes': self.mock_scopes} self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs) def test_refresh_token(self): # This token info doesn't have a refresh token, so a new token will be # requested token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer', 'expires_in': 3600} new_token1 = self.conn.refresh_token(token_info1) self.assertEqual(new_token1['access_token'], STUB_IA_TOKEN['access_token']) # This token info has a refresh token, so it will be able to be # refreshed. token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer', 'expires_in': 3600, 'refresh_token': 'refreshrefresh'} new_token2 = self.conn.refresh_token(token_info2) self.assertEqual(new_token2['access_token'], STUB_REFRESH_TOKEN['access_token']) # Both sets should have refresh info self.assertTrue('refresh_token' in new_token1) self.assertTrue('refresh_token' in new_token2) class GoogleAuthTypeTest(GoogleTestCase): def test_guess(self): self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_IA[0]), GoogleAuthType.IA) with mock.patch.object(GoogleAuthType, '_is_gce', return_value=True): # Since _is_gce currently depends on the environment, not on # parameters, other auths should override GCE. It does not make # sense for IA auth to happen on GCE, which is why it's left out. self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS[0]), GoogleAuthType.SA) self.assertEqual( GoogleAuthType.guess_type(GCS_S3_PARAMS[0]), GoogleAuthType.GCS_S3) self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_GCE[0]), GoogleAuthType.GCE) class GoogleOAuth2CredentialTest(GoogleTestCase): def test_init_oauth2(self): kwargs = {'auth_type': GoogleAuthType.IA} cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs) # If there is a viable token file, this gets used first self.assertEqual(cred.token, STUB_TOKEN_FROM_FILE) # No token file, get a new token. Check that it gets written to file. with mock.patch.object(GoogleOAuth2Credential, '_get_token_from_file', return_value=None): cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs) expected = STUB_IA_TOKEN expected['expire_time'] = cred.token['expire_time'] self.assertEqual(cred.token, expected) cred._write_token_to_file.assert_called_once_with() def test_refresh(self): args = list(GCE_PARAMS) + [GoogleAuthType.GCE] cred = GoogleOAuth2Credential(*args) cred._refresh_token = mock.Mock() # Test getting an unexpired access token. tomorrow = datetime.datetime.now() + datetime.timedelta(days=1) cred.token = {'access_token': 'Access Token!', 'expire_time': _utc_timestamp(tomorrow)} cred.access_token self.assertFalse(cred._refresh_token.called) # Test getting an expired access token. yesterday = datetime.datetime.now() - datetime.timedelta(days=1) cred.token = {'access_token': 'Access Token!', 'expire_time': _utc_timestamp(yesterday)} cred.access_token self.assertTrue(cred._refresh_token.called) def test_auth_connection(self): # Test a bogus auth type self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS, **{'auth_type': 'XX'}) # Try to create an OAuth2 credential when dealing with a GCS S3 # interoperability auth type. self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS, **{'auth_type': GoogleAuthType.GCS_S3}) kwargs = {} if SHA256: kwargs['auth_type'] = GoogleAuthType.SA cred1 = GoogleOAuth2Credential(*GCE_PARAMS_PEM_KEY, **kwargs) self.assertTrue(isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)) cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY, **kwargs) self.assertTrue(isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)) cred1 = GoogleOAuth2Credential(*GCE_PARAMS_KEY, **kwargs) self.assertTrue(isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)) kwargs['auth_type'] = GoogleAuthType.IA cred2 = GoogleOAuth2Credential(*GCE_PARAMS_IA, **kwargs) self.assertTrue(isinstance(cred2.oauth2_conn, GoogleInstalledAppAuthConnection)) kwargs['auth_type'] = GoogleAuthType.GCE cred3 = GoogleOAuth2Credential(*GCE_PARAMS_GCE, **kwargs) self.assertTrue(isinstance(cred3.oauth2_conn, GoogleGCEServiceAcctAuthConnection)) class GoogleBaseConnectionTest(GoogleTestCase): """ Tests for GoogleBaseConnection """ def setUp(self): GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp self.mock_scopes = ['https://www.googleapis.com/auth/foo'] kwargs = {'scopes': self.mock_scopes, 'auth_type': GoogleAuthType.IA} self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs) def test_add_default_headers(self): old_headers = {} new_expected_headers = {'Content-Type': 'application/json', 'Host': 'www.googleapis.com'} new_headers = self.conn.add_default_headers(old_headers) self.assertEqual(new_headers, new_expected_headers) def test_pre_connect_hook(self): old_params = {} old_headers = {} auth_str = '%s %s' % (STUB_TOKEN_FROM_FILE['token_type'], STUB_TOKEN_FROM_FILE['access_token']) new_expected_params = {} new_expected_headers = {'Authorization': auth_str} new_params, new_headers = self.conn.pre_connect_hook(old_params, old_headers) self.assertEqual(new_params, new_expected_params) self.assertEqual(new_headers, new_expected_headers) def test_encode_data(self): data = {'key': 'value'} json_data = '{"key": "value"}' encoded_data = self.conn.encode_data(data) self.assertEqual(encoded_data, json_data) def test_has_completed(self): body1 = {"endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "kind": "compute#operation", "status": "DONE", "targetId": "16211908079305042870"} body2 = {"endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "kind": "compute#operation", "status": "RUNNING", "targetId": "16211908079305042870"} response1 = MockJsonResponse(body1) response2 = MockJsonResponse(body2) self.assertTrue(self.conn.has_completed(response1)) self.assertFalse(self.conn.has_completed(response2)) def test_get_poll_request_kwargs(self): body = {"endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "kind": "compute#operation", "selfLink": "https://www.googleapis.com/operations-test"} response = MockJsonResponse(body) expected_kwargs = {'action': 'https://www.googleapis.com/operations-test'} kwargs = self.conn.get_poll_request_kwargs(response, None, {}) self.assertEqual(kwargs, expected_kwargs) def test_morph_action_hook(self): self.conn.request_path = '/compute/apiver/project/project-name' action1 = ('https://www.googleapis.com/compute/apiver/project' '/project-name/instances') action2 = '/instances' expected_request = '/compute/apiver/project/project-name/instances' request1 = self.conn.morph_action_hook(action1) request2 = self.conn.morph_action_hook(action2) self.assertEqual(request1, expected_request) self.assertEqual(request2, expected_request) class GoogleAuthMockHttp(MockHttp): """ Mock HTTP Class for Google Auth Connections. """ json_hdr = {'content-type': 'application/json; charset=UTF-8'} def _o_oauth2_token(self, method, url, body, headers): if 'code' in body: body = json.dumps(STUB_IA_TOKEN) elif 'refresh_token' in body: body = json.dumps(STUB_REFRESH_TOKEN) else: body = json.dumps(STUB_TOKEN) return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/secrets.py-dist0000664000175000017500000001051513153541406022161 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Make a copy of this file named 'secrets.py' and add your credentials there. # Note you can run unit tests without setting your credentials. BLUEBOX_PARAMS = ('customer_id', 'api_key') BRIGHTBOX_PARAMS = ('client_id', 'client_secret') EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key') # Service Account Authentication # GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GCE_KEYWORD_PARAMS = {'project': 'project_name'} GKE_PARAMS = ('email@developer.gserviceaccount.com', 'key') # Service Account Authentication # GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GKE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') IBM_PARAMS = ('user', 'secret') ONAPP_PARAMS = ('key') # OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) ONEANDONE_PARAMS = ('token') OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) OPENNEBULA_PARAMS = ('user', 'key') DIMENSIONDATA_PARAMS = ('user', 'password') OPSOURCE_PARAMS = ('user', 'password') OVH_PARAMS = ('application_key', 'application_secret', 'project_id', 'consumer_key') RACKSPACE_PARAMS = ('user', 'key') RACKSPACE_NOVA_PARAMS = ('user_name', 'api_key', False, 'host', 8774) SLICEHOST_PARAMS = ('key',) SOFTLAYER_PARAMS = ('user', 'api_key') VCLOUD_PARAMS = ('user', 'secret') VOXEL_PARAMS = ('key', 'secret') VPSNET_PARAMS = ('user', 'key') JOYENT_PARAMS = ('user', 'key') VCL_PARAMS = ('user', 'pass', True, 'foo.bar.com') GRIDSPOT_PARAMS = ('key',) HOSTVIRTUAL_PARAMS = ('key',) DIGITALOCEAN_v1_PARAMS = ('user', 'key') DIGITALOCEAN_v2_PARAMS = ('token',) CLOUDFRAMES_PARAMS = ('key', 'secret', False, 'host', 8888) PROFIT_BRICKS_PARAMS = ('user', 'key') VULTR_PARAMS = ('key') PACKET_PARAMS = ('api_key') ECS_PARAMS = ('access_key', 'access_secret') CLOUDSCALE_PARAMS = ('token',) # Storage STORAGE_S3_PARAMS = ('key', 'secret') STORAGE_OSS_PARAMS = ('key', 'secret') # Google key = 20 char alphanumeric string starting with GOOG STORAGE_GOOGLE_STORAGE_PARAMS = ('GOOG0123456789ABCXYZ', 'secret') # Azure key is b64 encoded and must be decoded before signing requests STORAGE_AZURE_BLOBS_PARAMS = ('account', 'cGFzc3dvcmQ=') # Loadbalancer LB_BRIGHTBOX_PARAMS = ('user', 'key') LB_ELB_PARAMS = ('access_id', 'secret', 'region') LB_ALB_PARAMS = ('access_id', 'secret', 'region') LB_SLB_PARAMS = ('access_id', 'secret', 'region') # DNS DNS_PARAMS_LINODE = ('key') DNS_PARAMS_ZERIGO = ('email', 'api token') DNS_PARAMS_RACKSPACE = ('user', 'key') DNS_PARAMS_HOSTVIRTUAL = ('key',) DNS_PARAMS_ROUTE53 = ('access_id', 'secret') DNS_GANDI = ('user', ) DNS_PARAMS_GOOGLE = ('email_address', 'key') DNS_KEYWORD_PARAMS_GOOGLE = {'project': 'project_name'} DNS_PARAMS_WORLDWIDEDNS = ('user', 'key') DNS_PARAMS_DNSIMPLE = ('user', 'key') DNS_PARAMS_POINTDNS = ('user', 'key') DNS_PARAMS_LIQUIDWEB = ('user', 'key') DNS_PARAMS_ZONOMI = ('key') DNS_PARAMS_DURABLEDNS = ('api_user', 'api_key') DNS_PARAMS_GODADDY = ('customer-id', 'api_user', 'api_key') DNS_PARAMS_CLOUDFLARE = ('user@example.com', 'key') DNS_PARAMS_AURORADNS = ('apikey', 'secretkey') DNS_PARAMS_NSONE = ('key', ) DNS_PARAMS_LUADNS = ('user', 'key') DNS_PARAMS_BUDDYNS = ('key', ) DNS_PARAMS_DNSPOD = ('key', ) DNS_PARAMS_ONAPP = ('key', 'secret') # Container CONTAINER_PARAMS_DOCKER = ('user', 'password') CONTAINER_PARAMS_ECS = ('user', 'password', 'region') CONTAINER_PARAMS_KUBERNETES = ('user', 'password') CONTAINER_PARAMS_RANCHER = ('user', 'password') CONTAINER_PARAMS_GKE = ('user', 'password') apache-libcloud-2.2.1/libcloud/test/dns/0000775000175000017500000000000013160535110017751 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/__init__.py0000664000175000017500000000000012701023453022052 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/test_powerdns.py0000664000175000017500000001720613153541406023240 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import sys import unittest import json from libcloud.utils.py3 import httplib from libcloud.dns.base import Record, Zone from libcloud.dns.drivers.powerdns import PowerDNSDriver from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.types import RecordType from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test.file_fixtures import DNSFileFixtures class PowerDNSTestCase(LibcloudTestCase): def setUp(self): PowerDNSDriver.connectionCls.conn_class = PowerDNSMockHttp PowerDNSMockHttp.type = None self.driver = PowerDNSDriver('testsecret') self.test_zone = Zone(id='example.com.', domain='example.com', driver=self.driver, type='master', ttl=None, extra={}) self.test_record = Record(id=None, name='', data='192.0.2.1', type=RecordType.A, zone=self.test_zone, driver=self.driver, extra={}) def test_create_record(self): record = self.test_zone.create_record(name='newrecord.example.com', type=RecordType.A, data='192.0.5.4', extra={'ttl': 86400}) self.assertEqual(record.id, None) self.assertEqual(record.name, 'newrecord.example.com') self.assertEqual(record.data, '192.0.5.4') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.ttl, 86400) def test_create_zone(self): extra = {'nameservers': ['ns1.example.org', 'ns2.example.org']} zone = self.driver.create_zone('example.org', extra=extra) self.assertEqual(zone.id, 'example.org.') self.assertEqual(zone.domain, 'example.org') self.assertEqual(zone.type, None) self.assertEqual(zone.ttl, None) def test_delete_record(self): self.assertTrue(self.test_record.delete()) def test_delete_zone(self): self.assertTrue(self.test_zone.delete()) def test_get_record(self): with self.assertRaises(NotImplementedError): self.driver.get_record('example.com.', '12345') def test_get_zone(self): zone = self.driver.get_zone('example.com.') self.assertEqual(zone.id, 'example.com.') self.assertEqual(zone.domain, 'example.com') self.assertEqual(zone.type, None) self.assertEqual(zone.ttl, None) def test_list_record_types(self): result = self.driver.list_record_types() self.assertEqual(len(result), 23) def test_list_records(self): records = self.driver.list_records(self.test_zone) self.assertEqual(len(records), 4) def test_list_zones(self): zones = self.driver.list_zones() self.assertEqual(zones[0].id, 'example.com.') self.assertEqual(zones[0].domain, 'example.com') self.assertEqual(zones[0].type, None) self.assertEqual(zones[0].ttl, None) self.assertEqual(zones[1].id, 'example.net.') self.assertEqual(zones[1].domain, 'example.net') self.assertEqual(zones[1].type, None) self.assertEqual(zones[1].ttl, None) def test_update_record(self): record = self.driver.update_record(self.test_record, name='newrecord.example.com', type=RecordType.A, data='127.0.0.1', extra={'ttl': 300}) self.assertEqual(record.id, None) self.assertEqual(record.name, 'newrecord.example.com') self.assertEqual(record.data, '127.0.0.1') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.ttl, 300) def test_update_zone(self): with self.assertRaises(NotImplementedError): self.driver.update_zone(self.test_zone, 'example.net') # Test some error conditions def test_create_existing_zone(self): PowerDNSMockHttp.type = 'EXISTS' extra = {'nameservers': ['ns1.example.com', 'ns2.example.com']} with self.assertRaises(ZoneAlreadyExistsError): self.driver.create_zone('example.com', extra=extra) def test_get_missing_zone(self): PowerDNSMockHttp.type = 'MISSING' with self.assertRaises(ZoneDoesNotExistError): self.driver.get_zone('example.com.') def test_delete_missing_record(self): PowerDNSMockHttp.type = 'MISSING' self.assertFalse(self.test_record.delete()) def test_delete_missing_zone(self): PowerDNSMockHttp.type = 'MISSING' self.assertFalse(self.test_zone.delete()) class PowerDNSMockHttp(MockHttp): fixtures = DNSFileFixtures('powerdns') base_headers = {'content-type': 'application/json'} def _servers_localhost_zones(self, method, url, body, headers): if method == 'GET': # list_zones() body = self.fixtures.load('list_zones.json') elif method == 'POST': # create_zone() # Don't bother with a fixture for this operation, because we do # nothing with the parsed body anyway. body = '' else: raise NotImplementedError('Unexpected method: %s' % method) return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _servers_localhost_zones_example_com_(self, method, *args, **kwargs): if method == 'GET': # list_records() body = self.fixtures.load('list_records.json') elif method == 'PATCH': # create/update/delete_record() # Don't bother with a fixture for these operations, because we do # nothing with the parsed body anyway. body = '' elif method == 'DELETE': # delete_zone() return (httplib.NO_CONTENT, '', self.base_headers, httplib.responses[httplib.NO_CONTENT]) else: raise NotImplementedError('Unexpected method: %s' % method) return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _servers_localhost_zones_EXISTS(self, method, url, body, headers): # create_zone() is a POST. Raise on all other operations to be safe. if method != 'POST': raise NotImplementedError('Unexpected method: %s' % method) payload = json.loads(body) domain = payload['name'] body = json.dumps({'error': "Domain '%s' already exists" % domain}) return (httplib.UNPROCESSABLE_ENTITY, body, self.base_headers, 'Unprocessable Entity') def _servers_localhost_zones_example_com__MISSING(self, *args, **kwargs): return (httplib.UNPROCESSABLE_ENTITY, 'Could not find domain', self.base_headers, 'Unprocessable Entity') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/dns/test_buddyns.py0000664000175000017500000001300713153541406023042 0ustar kamikami00000000000000import sys import unittest from libcloud.test import MockHttp from libcloud.test.file_fixtures import DNSFileFixtures from libcloud.test.secrets import DNS_PARAMS_BUDDYNS from libcloud.dns.drivers.buddyns import BuddyNSDNSDriver from libcloud.utils.py3 import httplib from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError from libcloud.dns.base import Zone class BuddyNSDNSTests(unittest.TestCase): def setUp(self): BuddyNSMockHttp.type = None BuddyNSDNSDriver.connectionCls.conn_class = BuddyNSMockHttp self.driver = BuddyNSDNSDriver(*DNS_PARAMS_BUDDYNS) self.test_zone = Zone(id='test.com', type='master', ttl=None, domain='test.com', extra={}, driver=self) def test_list_zones_empty(self): BuddyNSMockHttp.type = 'EMPTY_ZONES_LIST' zones = self.driver.list_zones() self.assertEqual(zones, []) def test_list_zones_success(self): BuddyNSMockHttp.type = 'LIST_ZONES' zones = self.driver.list_zones() self.assertEqual(len(zones), 2) zone = zones[0] self.assertEqual(zone.id, 'microsoft.com') self.assertEqual(zone.type, None) self.assertEqual(zone.domain, 'microsoft.com') self.assertEqual(zone.ttl, None) zone = zones[1] self.assertEqual(zone.id, 'google.de') self.assertEqual(zone.type, None) self.assertEqual(zone.domain, 'google.de') self.assertEqual(zone.ttl, None) def test_delete_zone_zone_does_not_exist(self): BuddyNSMockHttp.type = 'DELETE_ZONE_ZONE_DOES_NOT_EXIST' try: self.driver.delete_zone(zone=self.test_zone) except ZoneDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, self.test_zone.id) else: self.fail('Exception was not thrown') def test_delete_zone_success(self): BuddyNSMockHttp.type = 'DELETE_ZONE_SUCCESS' status = self.driver.delete_zone(zone=self.test_zone) self.assertTrue(status) def test_get_zone_zone_does_not_exist(self): BuddyNSMockHttp.type = 'GET_ZONE_ZONE_DOES_NOT_EXIST' try: self.driver.get_zone(zone_id='zonedoesnotexist.com') except ZoneDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, 'zonedoesnotexist.com') else: self.fail('Exception was not thrown') def test_get_zone_success(self): BuddyNSMockHttp.type = 'GET_ZONE_SUCCESS' zone = self.driver.get_zone(zone_id='myexample.com') self.assertEqual(zone.id, 'myexample.com') self.assertEqual(zone.domain, 'myexample.com') self.assertEqual(zone.type, None) self.assertEqual(zone.ttl, None) self.assertEqual(zone.driver, self.driver) def test_create_zone_success(self): BuddyNSMockHttp.type = 'CREATE_ZONE_SUCCESS' zone = self.driver.create_zone(domain='microsoft.com') self.assertEqual(zone.id, 'microsoft.com') self.assertEqual(zone.domain, 'microsoft.com') self.assertEqual(zone.type, None), self.assertEqual(zone.ttl, None) def test_create_zone_zone_already_exists(self): BuddyNSMockHttp.type = 'CREATE_ZONE_ZONE_ALREADY_EXISTS' try: self.driver.create_zone(domain='newzone.com', extra={'master': '13.0.0.1'}) except ZoneAlreadyExistsError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, 'newzone.com') else: self.fail('Exception was not thrown') class BuddyNSMockHttp(MockHttp): fixtures = DNSFileFixtures('buddyns') def _api_v2_zone_EMPTY_ZONES_LIST(self, method, url, body, headers): body = self.fixtures.load('empty_zones_list.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_LIST_ZONES(self, method, url, body, headers): body = self.fixtures.load('list_zones.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_zonedoesnotexist_com_GET_ZONE_ZONE_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return 404, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_myexample_com_GET_ZONE_SUCCESS(self, method, url, body, headers): body = self.fixtures.load('get_zone_success.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_test_com_DELETE_ZONE_SUCCESS( self, method, url, body, headers): body = self.fixtures.load('delete_zone_success.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_test_com_DELETE_ZONE_ZONE_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_CREATE_ZONE_SUCCESS(self, method, url, body, headers): body = self.fixtures.load('create_zone_success.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _api_v2_zone_CREATE_ZONE_ZONE_ALREADY_EXISTS( self, method, url, body, headers): body = self.fixtures.load('zone_already_exists.json') return httplib.OK, body, {}, httplib.responses[httplib.OK] if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/dns/fixtures/0000775000175000017500000000000013160535107021630 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/0000775000175000017500000000000013160535110023121 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/create_record.xml0000664000175000017500000000065612701023453026455 0ustar kamikami00000000000000 2008-12-07T02:51:13Z 127.0.0.1 www.example.com A www 23456780 2008-12-07T02:51:13Z 12345678 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/get_zone.xml0000664000175000017500000000216112701023453025457 0ustar kamikami00000000000000 2008-12-07T02:40:02Z ns1.example.com,ns2.example.com true 600 example.com dnsadmin@example.com 12345678 pri_sec one two 2008-12-07T02:40:02Z 1 2008-12-07T02:51:13Z 172.16.16.1 example.com A 23456789 2008-12-07T02:51:13Z 12345678 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/create_zone.xml0000664000175000017500000000120312701023453026137 0ustar kamikami00000000000000 2008-12-07T02:40:02Z ns1.example.com,ns2.example.com true 600 foo.bar.com dnsadmin@example.com 12345679 pri_sec 2008-12-07T02:40:02Z 0 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/list_zones.xml0000664000175000017500000000106112701023453026034 0ustar kamikami00000000000000 2008-12-07T02:40:02Z false 600 example.com 12345678 test foo bar pri_sec 2008-12-07T02:40:02Z apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml0000664000175000017500000000002712701023453030615 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/list_records.xml0000664000175000017500000000344712701023453026351 0ustar kamikami00000000000000 2008-12-07T02:51:13Z 172.16.16.1 www.example.com A www 23456789 2008-12-07T02:51:13Z 12345678 2008-12-07T02:51:13Z 172.16.16.2 test.example.com A test 23456789 3600 2008-12-07T02:51:13Z 12345678 2008-12-07T02:51:13Z 172.16.16.3 test2.example.com A 23456789 3600 2008-12-07T02:51:13Z 12345678 2008-12-07T02:51:13Z 172.16.16.4 test4.example.com A 23456789 3600 2008-12-07T02:51:13Z 12345678 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/get_record.xml0000664000175000017500000000065412701023453025767 0ustar kamikami00000000000000 2008-12-07T02:51:13Z 172.16.16.1 example.com A www 23456789 2008-12-07T02:51:13Z 12345678 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml0000664000175000017500000000021112701023453031560 0ustar kamikami00000000000000 Ns type is not included in the list Default ttl must be greater than or equal to 60 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml0000664000175000017500000000002712701023453030312 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/0000775000175000017500000000000013160535110023135 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/couldnt_convert.xml0000664000175000017500000000013412701023453027067 0ustar kamikami00000000000000 ERROR: Could not find a 'nonexistentzone.com' zone. apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/create_record.xml0000664000175000017500000000070312701023453026462 0ustar kamikami00000000000000 OK: \n \n \n\n \n \t apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/converted_to_slave.xml0000664000175000017500000000036312701023453027550 0ustar kamikami00000000000000 OK: \n zone.com successfully converted to a slave zone. Our name servers will pick up DNS changes from the master server you specified. apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/create_record_already_exists.xml0000664000175000017500000000071612701023453031566 0ustar kamikami00000000000000 OK: \n \n \n\n \n \t apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/create_zone_already_exists.xml0000664000175000017500000000012612701023453031256 0ustar kamikami00000000000000 ERROR: This zone is already in your zone list. apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/create_zone.xml0000664000175000017500000000021712701023453026157 0ustar kamikami00000000000000 OK: \n myzone.com added successfully apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/list_zones.xml0000664000175000017500000000101312701023453026045 0ustar kamikami00000000000000 OK: \n \n \n\n \n \n \n apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/delete_zone.xml0000664000175000017500000000057312701023453026163 0ustar kamikami00000000000000 OK: \n \n \n\n \n apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/list_records.xml0000664000175000017500000000137712701023453026365 0ustar kamikami00000000000000 OK: \n \n \n\n \n \t\n \t\n \t\n \t apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/delete_record_does_not_exist.xml0000664000175000017500000000042612701023453031571 0ustar kamikami00000000000000 OK: \n \n \n\n apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/delete_record.xml0000664000175000017500000000065212701023453026464 0ustar kamikami00000000000000 OK: \n \n \n\n \n \t apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/delete_zone_does_not_exist.xml0000664000175000017500000000010512701023453031260 0ustar kamikami00000000000000 ERROR: No zone found for zone apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/empty_zones_list.xml0000664000175000017500000000037112701023453027271 0ustar kamikami00000000000000 OK: \n \n \n\n apache-libcloud-2.2.1/libcloud/test/dns/fixtures/zonomi/converted_to_master.xml0000664000175000017500000000032212701023453027724 0ustar kamikami00000000000000 OK: \n This service is now the master for this zone. It will no longer listen for changes from 1.2.3.4. apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/0000775000175000017500000000000013160535110023556 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/list_records_no_results.json0000664000175000017500000000065212701023453031427 0ustar kamikami00000000000000{ "name":"foo4.bar.com", "id":2946063, "comment":"wazaaa", "accountId":11111, "updated":"2011-10-29T18:42:28.000+0000", "ttl":855, "recordsList":{ "records":[], "totalEntries":0 }, "emailAddress":"kami@kami.si", "nameservers":[ { "name":"ns.rackspace.com" }, { "name":"ns2.rackspace.com" } ], "created":"2011-10-29T14:47:09.000+0000" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/unauthorized.json0000664000175000017500000000011112701023453027165 0ustar kamikami00000000000000{"unauthorized":{"message":"Username or api key is invalid","code":401}} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/auth_2_0.json0000664000175000017500000001437112701023453026062 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14.000-06:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" }, { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage4.ord1.clouddrive.com/v1/MossoCloudFS", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage4.lon1.clouddrive.com/v1/MossoCloudFS", "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "tenantId": "1337", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", "version": { "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "RegionOne", "tenantId": "1337", "publicURL": "https://127.0.0.1/v2/1337", "versionInfo": "https://127.0.0.1/v2/", "versionList": "https://127.0.0.1/", "versionId": "2" } ], "name": "nova", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "613469", "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", "versionList": "https://dfw.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "ORD", "tenantId": "613469", "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", "versionList": "https://ord.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "1337", "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" } ], "name": "cloudServersPreprod", "type": "compute" }, { "name": "cloudDNS", "endpoints": [ { "tenantId": "11111", "publicURL": "https://dns.api.rackspacecloud.com/v1.0/11111" } ], "type": "rax:dns" }, { "name": "cloudLoadBalancers", "endpoints": [ { "region": "SYD", "tenantId": "11111", "publicURL": "https://syd.loadbalancers.api.rackspacecloud.com/v1.0/11111" }, { "region": "DFW", "tenantId": "11111", "publicURL": "https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111" }, { "region": "ORD", "tenantId": "11111", "publicURL": "https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111" } ], "type": "rax:load-balancer" } ], "user": { "id": "7", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "testuser" } } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/update_record_success.json0000664000175000017500000000064112701023453031024 0ustar kamikami00000000000000{ "request":"{\"comment\": \"lulz\", \"data\": \"127.3.3.3\", \"name\": \"www.bar.foo1.com\", \"ttl\": 777}", "status":"COMPLETED", "verb":"PUT", "jobId":"251c0d0c-95bc-4e09-b99f-4b8748b66246", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/251c0d0c-95bc-4e09-b99f-4b8748b66246", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records/A-7423317" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/create_ptr_record_success.json0000664000175000017500000000163012701023453031671 0ustar kamikami00000000000000{ "request":"{\"recordsList\": {\"records\": [{\"data\": \"127.1.1.1\", \"type\": \"PTR\", \"name\": \"www.foo4.bar.com\"}]}, \"link\": {\"content\": \"\", \"href\": \"https://ord.servers.api.rackspacecloud.com/v2/905546514/servers/370b0ff8-3f57-4e10-ac84-e9145ce00584\", \"rel\": \"cloudServersOpenStack\"}}", "response":{ "records":[ { "name":"www.foo4.bar.com", "id":"PTR-7423317", "type":"PTR", "data":"127.1.1.1", "updated":"2011-10-29T20:50:41.000+0000", "ttl":3600, "created":"2011-10-29T20:50:41.000+0000" } ] }, "status":"COMPLETED", "verb":"POST", "jobId":"12345678-5739-43fb-8939-f3a2c4c0e99c", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/12345678-5739-43fb-8939-f3a2c4c0e99c", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/rdns" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/create_zone_success.json0000664000175000017500000000167012701023453030505 0ustar kamikami00000000000000{ "request":"{\"domains\": [{\"recordsList\": {\"records\": []}, \"emailAddress\": \"test@test.com\", \"name\": \"bar.foo1.com\"}]}", "response":{ "domains":[ { "name":"bar.foo1.com", "id":2946173, "accountId":11111, "updated":"2011-10-29T20:28:59.000+0000", "ttl":3600, "emailAddress":"test@test.com", "nameservers":[ { "name":"ns.rackspace.com" }, { "name":"ns2.rackspace.com" } ], "created":"2011-10-29T20:28:59.000+0000" } ] }, "status":"COMPLETED", "verb":"POST", "jobId":"288795f9-e74d-48be-880b-a9e36e0de61e", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/288795f9-e74d-48be-880b-a9e36e0de61e", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json0000664000175000017500000000016112701023453032372 0ustar kamikami00000000000000{"validationErrors":{"messages":["Domain TTL is required and must be greater than or equal to 300"]},"code":400} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/does_not_exist.json0000664000175000017500000000007012701023453027476 0ustar kamikami00000000000000{"message":"Object not Found.","code":404,"details":""} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/list_zones_success.json0000664000175000017500000000260112701023453030373 0ustar kamikami00000000000000{ "domains":[ { "name":"foo4.bar.com", "id":2946063, "comment":"wazaaa", "accountId":11111, "updated":"2011-10-29T18:42:28.000+0000", "created":"2011-10-29T14:47:09.000+0000" }, { "name":"foo5.bar.com", "id":2946065, "comment":"fuu", "accountId":11111, "updated":"2011-10-29T14:48:39.000+0000", "created":"2011-10-29T14:48:39.000+0000" }, { "name":"foo6.bar.com", "id":2946066, "comment":"fuu", "accountId":11111, "updated":"2011-10-29T14:48:59.000+0000", "created":"2011-10-29T14:48:58.000+0000" }, { "name":"foo7.bar.com", "id":2946068, "comment":"fuu", "accountId":11111, "updated":"2011-10-29T14:49:14.000+0000", "created":"2011-10-29T14:49:13.000+0000" }, { "name":"foo8.bar.com", "id":2946069, "comment":"fuu", "accountId":11111, "updated":"2011-10-29T14:49:44.000+0000", "created":"2011-10-29T14:49:43.000+0000" }, { "name":"foo9.bar.com", "id":2946071, "comment":"fuu", "accountId":11111, "updated":"2011-10-29T14:54:45.000+0000", "created":"2011-10-29T14:54:45.000+0000" } ], "totalEntries":6 } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/list_ptr_records_success.json0000664000175000017500000000046312701023453031567 0ustar kamikami00000000000000{ "records":[ { "name":"test3.foo4.bar.com", "id":"PTR-7423034", "type":"PTR", "comment":"lulz", "data":"127.7.7.7", "updated":"2011-10-29T18:42:28.000+0000", "ttl":777, "created":"2011-10-29T15:29:29.000+0000" } ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/get_zone_success.json0000664000175000017500000000245312701023453030021 0ustar kamikami00000000000000{ "name":"foo4.bar.com", "id":2946063, "comment":"wazaaa", "accountId":11111, "updated":"2011-10-29T18:42:28.000+0000", "ttl":855, "recordsList":{ "records":[ { "name":"test3.foo4.bar.com", "id":"A-7423034", "type":"A", "comment":"lulz", "data":"127.7.7.7", "updated":"2011-10-29T18:42:28.000+0000", "ttl":777, "created":"2011-10-29T15:29:29.000+0000" }, { "name":"foo4.bar.com", "id":"NS-6717885", "type":"NS", "data":"dns1.stabletransit.com", "updated":"2011-10-29T14:47:09.000+0000", "ttl":3600, "created":"2011-10-29T14:47:09.000+0000" }, { "name":"foo4.bar.com", "id":"NS-6717886", "type":"NS", "data":"dns2.stabletransit.com", "updated":"2011-10-29T14:47:09.000+0000", "ttl":3600, "created":"2011-10-29T14:47:09.000+0000" } ], "totalEntries":3 }, "emailAddress":"test@test.com", "nameservers":[ { "name":"ns.rackspace.com" }, { "name":"ns2.rackspace.com" } ], "created":"2011-10-29T14:47:09.000+0000" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/delete_ptr_record_success.json0000664000175000017500000000045512701023453031674 0ustar kamikami00000000000000{ "status":"COMPLETED", "verb":"DELETE", "jobId":"12345678-2e5d-490f-bb6e-fdc65d1118a9", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/12345678-2e5d-490f-bb6e-fdc65d1118a9", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/rdns/cloudServersOpenStack" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/delete_record_success.json0000664000175000017500000000045712701023453031011 0ustar kamikami00000000000000{ "status":"COMPLETED", "verb":"DELETE", "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181/records/2346" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/create_record_success.json0000664000175000017500000000133312701023453031004 0ustar kamikami00000000000000{ "request":"{\"records\": [{\"data\": \"127.1.1.1\", \"type\": \"A\", \"name\": \"www.foo4.bar.com\"}]}", "response":{ "records":[ { "name":"www.foo4.bar.com", "id":"A-7423317", "type":"A", "data":"127.1.1.1", "updated":"2011-10-29T20:50:41.000+0000", "ttl":3600, "created":"2011-10-29T20:50:41.000+0000" } ] }, "status":"COMPLETED", "verb":"POST", "jobId":"586605c8-5739-43fb-8939-f3a2c4c0e99c", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/586605c8-5739-43fb-8939-f3a2c4c0e99c", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/auth_1_1.json0000664000175000017500000000165512701023453026063 0ustar kamikami00000000000000{ "auth":{ "token":{ "id":"fooo-bar-fooo-bar-fooo-bar", "expires":"2031-10-29T17:39:28.000-05:00" }, "serviceCatalog":{ "cloudFilesCDN":[ { "region":"ORD", "publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS_f66473fb-2e1e-4a44-barr-foooooo", "v1Default":true } ], "cloudFiles":[ { "region":"ORD", "publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr", "v1Default":true, "internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr" } ], "cloudServers":[ { "publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/11111", "v1Default":true } ] } } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/update_zone_success.json0000664000175000017500000000046112701023453030521 0ustar kamikami00000000000000{ "request":"{}", "status":"COMPLETED", "verb":"PUT", "jobId":"116a8f17-38ac-4862-827c-506cd04800d5", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/116a8f17-38ac-4862-827c-506cd04800d5", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946173" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/get_record_success.json0000664000175000017500000000033312701023453030317 0ustar kamikami00000000000000{ "name":"test3.foo4.bar.com", "id":"A-7423034", "type":"A", "comment":"lulz", "data":"127.7.7.7", "updated":"2011-10-29T18:42:28.000+0000", "ttl":777, "created":"2011-10-29T15:29:29.000+0000" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/list_records_success.json0000664000175000017500000000245312701023453030703 0ustar kamikami00000000000000{ "name":"foo4.bar.com", "id":2946063, "comment":"wazaaa", "accountId":11111, "updated":"2011-10-29T18:42:28.000+0000", "ttl":855, "recordsList":{ "records":[ { "name":"test3.foo4.bar.com", "id":"A-7423034", "type":"A", "comment":"lulz", "data":"127.7.7.7", "updated":"2011-10-29T18:42:28.000+0000", "ttl":777, "created":"2011-10-29T15:29:29.000+0000" }, { "name":"foo4.bar.com", "id":"NS-6717885", "type":"NS", "data":"dns1.stabletransit.com", "updated":"2011-10-29T14:47:09.000+0000", "ttl":3600, "created":"2011-10-29T14:47:09.000+0000" }, { "name":"foo4.bar.com", "id":"NS-6717886", "type":"NS", "data":"dns2.stabletransit.com", "updated":"2011-10-29T14:47:09.000+0000", "ttl":3600, "created":"2011-10-29T14:47:09.000+0000" } ], "totalEntries":3 }, "emailAddress":"test@test.com", "nameservers":[ { "name":"ns.rackspace.com" }, { "name":"ns2.rackspace.com" } ], "created":"2011-10-29T14:47:09.000+0000" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/delete_zone_success.json0000664000175000017500000000044112701023453030477 0ustar kamikami00000000000000{ "status":"COMPLETED", "verb":"DELETE", "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json0000664000175000017500000000005212701023453031116 0ustar kamikami00000000000000{ "domains":[], "totalEntries":0 } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/0000775000175000017500000000000013160535110023074 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/update_resource.json0000664000175000017500000000014712701023453027164 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": { "ResourceID": 3585100 }, "ACTION": "domain.resource.update" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/update_domain.json0000664000175000017500000000013112701023453026575 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": { "DomainID": 5093 }, "ACTION": "domain.update" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/create_resource.json0000664000175000017500000000014712701023453027145 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": { "ResourceID": 3585100 }, "ACTION": "domain.resource.create" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/create_domain.json0000664000175000017500000000013112701023453026556 0ustar kamikami00000000000000{ "ERRORARRAY": [], "ACTION": "domain.create", "DATA": { "DomainID": 5094 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json0000664000175000017500000000021712701023453031670 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 5, "ERRORMESSAGE": "Object not found" } ], "DATA": {}, "ACTION": "domain.delete" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json0000664000175000017500000000023012701023453032243 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 5, "ERRORMESSAGE": "Object not found" } ], "DATA": {}, "ACTION": "domain.resource.delete" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/delete_resource.json0000664000175000017500000000014712701023453027144 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": { "ResourceID": 3585141 }, "ACTION": "domain.resource.delete" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/delete_domain.json0000664000175000017500000000013112701023453026555 0ustar kamikami00000000000000{ "ERRORARRAY": [], "ACTION": "domain.delete", "DATA": { "DomainID": 5123 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json0000664000175000017500000000022612701023453031214 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 5, "ERRORMESSAGE": "Object not found" } ], "DATA": {}, "ACTION": "domain.resource.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/get_record.json0000664000175000017500000000046512701023453026113 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": [ { "DOMAINID": 5093, "PORT": 80, "RESOURCEID": 3585100, "NAME": "www", "WEIGHT": 5, "TTL_SEC": 0, "TARGET": "127.0.0.1", "PRIORITY": 10, "PROTOCOL": "", "TYPE": "a" } ], "ACTION": "domain.resource.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json0000664000175000017500000000021512701023453030707 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 5, "ERRORMESSAGE": "Object not found" } ], "DATA": {}, "ACTION": "domain.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/domain_list.json0000664000175000017500000000152412701023453026275 0ustar kamikami00000000000000{ "ERRORARRAY": [], "ACTION": "domain.list", "DATA": [ { "DOMAINID": 5093, "DESCRIPTION": "", "EXPIRE_SEC": 0, "RETRY_SEC": 0, "STATUS": 1, "LPM_DISPLAYGROUP": "thing", "MASTER_IPS": "", "REFRESH_SEC": 0, "SOA_EMAIL": "dns@example.com", "TTL_SEC": 0, "DOMAIN": "linode.com", "AXFR_IPS": "none", "TYPE": "master" }, { "DOMAINID": 5094, "DESCRIPTION": "", "EXPIRE_SEC": 0, "RETRY_SEC": 0, "STATUS": 1, "LPM_DISPLAYGROUP": "", "MASTER_IPS": "2600:3c03::f03c:91ff:feae:e071;66.228.43.47;", "REFRESH_SEC": 0, "SOA_EMAIL": "", "TTL_SEC": 0, "DOMAIN": "0.c.d.7.0.6.0.f.1.0.7.4.0.1.0.0.2.ip6.arpa", "AXFR_IPS": "2600:3c03::f03c:91ff:feae:e071;66.228.43.47;", "TYPE": "slave" } ] }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/get_zone.json0000664000175000017500000000063312701023453025605 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": [ { "DOMAINID": 5093, "DESCRIPTION": "", "EXPIRE_SEC": 0, "RETRY_SEC": 0, "STATUS": 1, "LPM_DISPLAYGROUP": "thing", "MASTER_IPS": "", "REFRESH_SEC": 0, "SOA_EMAIL": "dns@example.com", "TTL_SEC": 0, "DOMAIN": "linode.com", "AXFR_IPS": "none", "TYPE": "master" } ], "ACTION": "domain.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json0000664000175000017500000000022612701023453031761 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 5, "ERRORMESSAGE": "Object not found" } ], "DATA": {}, "ACTION": "domain.resource.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/resource_list.json0000664000175000017500000000106712701023453026657 0ustar kamikami00000000000000{ "ERRORARRAY": [], "DATA": [ { "DOMAINID": 5093, "PORT": 80, "RESOURCEID": 3585100, "NAME": "mc", "WEIGHT": 5, "TTL_SEC": 0, "TARGET": "127.0.0.1", "PRIORITY": 10, "PROTOCOL": "", "TYPE": "a" }, { "DOMAINID": 5093, "PORT": 25565, "RESOURCEID": 3585141, "NAME": "_minecraft._udp", "WEIGHT": 5, "TTL_SEC": 0, "TARGET": "mc.linode.com", "PRIORITY": 10, "PROTOCOL": "udp", "TYPE": "srv" } ], "ACTION": "domain.resource.list" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/linode/create_domain_validation_error.json0000664000175000017500000000035312701023453032207 0ustar kamikami00000000000000{ "ERRORARRAY": [ { "ERRORCODE": 8, "ERRORMESSAGE": "The domain 'linode.com' already exists in our database. Please open a ticket if you think this is in error." } ], "DATA": {}, "ACTION": "domain.create" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/0000775000175000017500000000000013160535110024206 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json0000664000175000017500000000013612701023453031164 0ustar kamikami00000000000000{ "error": { "code": 404, "message": "Not Found: id, validate_dns_zone_owner" } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/get_record.json0000664000175000017500000000015712701023453027223 0ustar kamikami00000000000000{ "id": "300377", "name": "*.t.com", "type": "CNAME", "content": "t.com", "ttl": "86400", "prio": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/list_records_none.json0000664000175000017500000000013712701023453030617 0ustar kamikami00000000000000{ "error": { "code": 404, "message": "Not Found: No Records Found" } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/list_zones.json0000664000175000017500000000072712701023453027302 0ustar kamikami00000000000000[ { "id": "47234", "name": "t.com", "type": "NATIVE", "ttl": "3600" }, { "id": "48170", "name": "newbug.net", "type": "NATIVE", "ttl": "3600" }, { "id": "48017", "name": "newblah.com", "type": "NATIVE", "ttl": "3600" }, { "id": "47288", "name": "fromapi.com", "type": "NATIVE", "ttl": "3600" }, { "id": "48008", "name": "blahnew.com", "type": "NATIVE", "ttl": "3600" } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/get_zone.json0000664000175000017500000000157412701023453026724 0ustar kamikami00000000000000{ "id": "47234", "name": "t.com", "type": "NATIVE", "ttl": "3600", "soa": { "primary": "ns1.hostvirtual.com", "hostmaster": "support@HOSTVIRTUAL.COM", "serial": "2012100901", "refresh": "10800", "retry": "3600", "expire": "604800", "default_ttl": "3600" }, "ns": [ "ns4.hostvirtual.com", "ns3.hostvirtual.com", "ns2.hostvirtual.com", "ns1.hostvirtual.com" ], "records": [ { "id": "300377", "name": "*.t.com", "type": "CNAME", "content": "t.com", "ttl": "86400", "prio": null }, { "id": "300719", "name": "blah.com.", "type": "A", "content": "0.0.0.0", "ttl": null, "prio": null }, { "id": "300728", "name": "blahblah.com.t.com", "type": "A", "content": "1.1.1.1", "ttl": null, "prio": "10" } ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/hostvirtual/list_records.json0000664000175000017500000000064212701023453027601 0ustar kamikami00000000000000[ { "id": "300377", "name": "*.t.com", "type": "CNAME", "content": "t.com", "ttl": "86400", "prio": null }, { "id": "300719", "name": "www.t.com", "type": "A", "content": "208.111.35.173", "ttl": null, "prio": null }, { "id": "300728", "name": "blahblah.t.com", "type": "A", "content": "208.111.35.173", "ttl": null, "prio": "10" } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/0000775000175000017500000000000013160535110024327 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_UPDATE_ZONE0000664000175000017500000000065112701023453033015 0ustar kamikami00000000000000mail.niteowebsponsoredthisone.com 21600 10800 604800 3800 wwwA0.0.0.0 @A0.0.0.0 @MX10 niteowebsponsoredthisone.com NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/_api_dns_modify_asp_CREATE_RECORD_MAX_ENTRIES_WITH_ENTRYapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/_api_dns_modify_asp_CREATE_RECORD_MAX_0000664000175000017500000000153712701023453033175 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 domain2A0.0.0.2 @MX10 niteowebsponsoredthisone.com domain4A0.0.0.4 domain5A0.0.0.5 domain6A0.0.0.6 domain7A0.0.0.7 domain8A0.0.0.8 domain9A0.0.0.9 domain10A0.0.0.10 domain11A0.0.0.11 domain12A0.0.0.12 domain13A0.0.0.13 domain14A0.0.0.14 domain15A0.0.0.15 domain16A0.0.0.16 domain17A0.0.0.17 domain18A0.0.0.18 domain19A0.0.0.19 domain20A0.0.0.20 domain21A0.0.0.21 domain22A0.0.0.22 domain23bA0.0.0.41 domain24A0.0.0.24 domain25A0.0.0.25 domain26A0.0.0.26 domain27A0.0.0.27 domain28A0.0.0.28 domain29A0.0.0.29 domain30A0.0.0.30 domain31A0.0.0.31 domain32A0.0.0.32 domain33A0.0.0.33 domain34A0.0.0.34 domain35A0.0.0.35 domain36A0.0.0.36 domain37A0.0.0.37 domain38A0.0.0.38 domain39A0.0.0.39 domain40A0.0.0.40././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_SECOND_RECORDapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_SECOND_0000664000175000017500000000063712701023453033341 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 domain2A0.0.0.2 @MX10 niteowebsponsoredthisone.com domain4A0.0.0.4 domain1A0.0.0.1 NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_RECORD_MAX_ENTRIESapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_RECORD_0000664000175000017500000000153612701023453033343 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 domain2A0.0.0.2 @MX10 niteowebsponsoredthisone.com domain4A0.0.0.4 domain5A0.0.0.5 domain6A0.0.0.6 domain7A0.0.0.7 domain8A0.0.0.8 domain9A0.0.0.9 domain10A0.0.0.10 domain11A0.0.0.11 domain12A0.0.0.12 domain13A0.0.0.13 domain14A0.0.0.14 domain15A0.0.0.15 domain16A0.0.0.16 domain17A0.0.0.17 domain18A0.0.0.18 domain19A0.0.0.19 domain20A0.0.0.20 domain21A0.0.0.21 domain22A0.0.0.22 domain23A0.0.0.23 domain24A0.0.0.24 domain25A0.0.0.25 domain26A0.0.0.26 domain27A0.0.0.27 domain28A0.0.0.28 domain29A0.0.0.29 domain30A0.0.0.30 domain31A0.0.0.31 domain32A0.0.0.32 domain33A0.0.0.33 domain34A0.0.0.34 domain35A0.0.0.35 domain36A0.0.0.36 domain37A0.0.0.37 domain38A0.0.0.38 domain39A0.0.0.39 domain40A0.0.0.40apache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_RECORD0000664000175000017500000000070112701023453033175 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 domain2A0.0.0.2 @MX10 niteowebsponsoredthisone.com domain4A0.0.0.4 NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONEapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_UPDATE_RECORD0000664000175000017500000000066412701023453033224 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 domain1A0.0.0.1 @A0.0.0.0 @MX10 niteowebsponsoredthisone.com NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONEapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_DELETE_RECORD0000664000175000017500000000065312701023453033202 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 NONE @MX10 niteowebsponsoredthisone.com NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONEapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp0000664000175000017500000000066012701023453031120 0ustar kamikami00000000000000hostmaster.niteowebsponsoredthisone.com 21600 10800 604800 43200 wwwA0.0.0.0 @A0.0.0.0 @MX10 niteowebsponsoredthisone.com NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONE NONEapache-libcloud-2.2.1/libcloud/test/dns/fixtures/worldwidedns/api_dns_list0000664000175000017500000000003612701023453026723 0ustar kamikami00000000000000niteowebsponsoredthisone.comPapache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/0000775000175000017500000000000013160535110023110 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/empty_zones_list.json0000664000175000017500000000000212701023453027404 0ustar kamikami00000000000000[]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/create_zone_success.json0000664000175000017500000000025212701023453030032 0ustar kamikami00000000000000{ "id": 3, "name": "example.org", "synced": false, "queries_count": 0, "records_count": 0, "aliases_count": 0, "redirects_count": 0, "forwards_count": 0 }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/empty_records_list.json0000664000175000017500000000000212701023453027707 0ustar kamikami00000000000000[]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/zone_already_exists.json0000664000175000017500000000017412701023453030062 0ustar kamikami00000000000000{ "status":"Forbidden", "request_id":"a75744f55cabe0411e02fa97e1a5d91b", "message":"Zone 'test.com' is taken already." }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/zone_does_not_exist.json0000664000175000017500000000014212701023453030063 0ustar kamikami00000000000000{"status":"Not Found","request_id":"be9cd5fd857254a4059357b2354edb92","message":"Zone not found."}apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/delete_record_success.json0000664000175000017500000000000012701023453030323 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/create_record_success.json0000664000175000017500000000031712701023453030337 0ustar kamikami00000000000000{ "id": 31, "name": "test.com.", "type": "A", "content": "127.0.0.1", "ttl": 13, "zone_id": 1, "created_at": "2015-01-17T14:04:35.251785849Z", "updated_at": "2015-01-17T14:04:35.251785972Z" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/get_record.json0000664000175000017500000000033712701023453026125 0ustar kamikami00000000000000{ "id": 31, "name": "example.com.", "type": "MX", "content": "10 mail.example.com.", "ttl": 300, "zone_id": 1, "created_at": "2015-01-17T14:04:35.251785849Z", "updated_at": "2015-01-17T14:04:35.251785972Z" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/records_list.json0000664000175000017500000000074012701023453026502 0ustar kamikami00000000000000[ { "id": 6683, "name": "example.org.", "type": "NS", "content": "b.ns.luadns.net.", "ttl": 86400, "zone_id": 3, "created_at": "2015-01-17T13:08:37.522452Z", "updated_at": "2015-01-17T13:08:37.522452Z" }, { "id": 6684, "name": "example.org.", "type": "NS", "content": "a.ns.luadns.net.", "ttl": 86400, "zone_id": 3, "created_at": "2015-01-17T13:08:37.520623Z", "updated_at": "2015-01-17T13:08:37.520623Z" } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/delete_zone_success.json0000664000175000017500000000000012701023453030020 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/record_does_not_exist.json0000664000175000017500000000014412701023453030370 0ustar kamikami00000000000000{"status":"Not Found","request_id":"be9cd5fd857254a4059357b2354edb92","message":"Record not found."}apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/get_zone.json0000664000175000017500000000177512701023453025631 0ustar kamikami00000000000000{ "id": 31, "name": "example.org", "synced": false, "queries_count": 0, "records_count": 3, "aliases_count": 0, "redirects_count": 0, "forwards_count": 0, "records": [ { "id": 6683, "name": "example.org.", "type": "SOA", "content": "a.ns.luadns.net. hostmaster.luadns.com. 1421501178 1200 120 604800 3600", "ttl": 3600, "zone_id": 3, "created_at": "2015-01-17T13:26:17.52747Z", "updated_at": "2015-01-17T13:26:17.527471Z" }, { "id": 6684, "name": "example.org.", "type": "NS", "content": "a.ns.luadns.net.", "ttl": 86400, "zone_id": 3, "created_at": "2015-01-17T13:26:17.529741Z", "updated_at": "2015-01-17T13:26:17.529741Z" }, { "id": 6685, "name": "example.org.", "type": "NS", "content": "b.ns.luadns.net.", "ttl": 86400, "zone_id": 3, "created_at": "2015-01-17T13:26:17.531911Z", "updated_at": "2015-01-17T13:26:17.531911Z" } ] }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/luadns/zones_list.json0000664000175000017500000000060212701023453026174 0ustar kamikami00000000000000[ { "id": 1, "name": "example.com", "synced": false, "queries_count": 0, "records_count": 3, "aliases_count": 0, "redirects_count": 0, "forwards_count": 0 }, { "id": 2, "name": "example.net", "synced": false, "queries_count": 0, "records_count": 3, "aliases_count": 0, "redirects_count": 0, "forwards_count": 0 } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/0000775000175000017500000000000013160535110023076 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/managed_zones_1.json0000664000175000017500000000045112701023453027025 0ustar kamikami00000000000000{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud1.googledomains.com.", "ns-cloud2.googledomains.com.", "ns-cloud3.googledomains.com.", "ns-cloud4.googledomains.com."], "creationTime": "2014-03-29T23:06:00.921Z", "dnsName": "example.com.", "id": "1", "description": ""} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/zone_list.json0000664000175000017500000000123712701023453026004 0ustar kamikami00000000000000{"kind": "dns#managedZonesListResponse", "managedZones": [{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""}, {"kind": "dns#managedZone", "name": "example-net", "nameServers": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "creationTime": "2014-03-29T22:45:46.990Z", "dnsName": "example.net.", "id": "2", "description": ""}]} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/records_list.json0000664000175000017500000000111612701023453026466 0ustar kamikami00000000000000{"rrsets": [{"rrdatas": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "kind": "dns#resourceRecordSet", "type": "NS", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["ns-cloud-d1.googledomains.com. dns-admin.google.com. 0 21600 3600 1209600 300"], "kind": "dns#resourceRecordSet", "type": "SOA", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["1.2.3.4"], "kind": "dns#resourceRecordSet", "type": "A", "name": "foo.example.com.", "ttl": 3600}], "kind": "dns#resourceRecordSetsListResponse"} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/zone.json0000664000175000017500000000046112701023453024747 0ustar kamikami00000000000000{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json0000664000175000017500000000044512701023453031101 0ustar kamikami00000000000000{ "error": { "errors": [ { "domain": "global", "reason": "notFound", "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." } ], "code": 404, "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/zone_create.json0000664000175000017500000000051312701023453026270 0ustar kamikami00000000000000{"kind": "dns#managedZone", "name": "example-org", "nameServers": ["ns-cloud-b1.googledomains.com.", "ns-cloud-b2.googledomains.com.", "ns-cloud-b3.googledomains.com.", "ns-cloud-b4.googledomains.com."], "creationTime": "2014-03-30T04:44:20.834Z", "dnsName": "example.org.", "id": "3", "description": "new domain for example.org"} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/record_changes.json0000664000175000017500000000065512701023453026747 0ustar kamikami00000000000000{ "kind": "dns#change", "additions": [ { "kind": "dns#resourceRecordSet", "name": "foo.example.com.", "type": "A", "ttl": 300, "rrdatas": [ "127.0.0.1" ] } ], "deletions": [ { "kind": "dns#resourceRecordSet", "name": "bar.example.com.", "type": "A", "ttl": 300, "rrdatas": [ "127.0.0.1" ] } ], "startTime": "2015-05-20T19:49:16.974Z", "id": "37", "status": "pending" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/record.json0000664000175000017500000000053312701023453025252 0ustar kamikami00000000000000{ "kind": "dns#resourceRecordSetsListResponse", "rrsets": [ { "kind": "dns#resourceRecordSet", "name": "foo.example.com.", "type": "A", "ttl": 21600, "rrdatas": [ "ns-cloud-c1.googledomains.com.", "ns-cloud-c2.googledomains.com.", "ns-cloud-c3.googledomains.com.", "ns-cloud-c4.googledomains.com." ] } ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/google/no_record.json0000664000175000017500000000010312701023453025737 0ustar kamikami00000000000000{ "kind": "dns#resourceRecordSetsListResponse", "rrsets": [ ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/powerdns/0000775000175000017500000000000013160535110023463 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/powerdns/list_zones.json0000664000175000017500000000107012705460761026562 0ustar kamikami00000000000000[ { "id":"example.com.", "url":"/servers/localhost/zones/example.com.", "name":"example.com", "kind":"Native", "dnssec":false, "account":"", "masters":[ ], "serial":1, "notified_serial":0, "last_check":0 }, { "id":"example.net.", "url":"/servers/localhost/zones/example.net.", "name":"example.net", "kind":"Native", "dnssec":false, "account":"", "masters":[ ], "serial":2016041501, "notified_serial":0, "last_check":0 } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/powerdns/list_records.json0000664000175000017500000000176612705460761027101 0ustar kamikami00000000000000{ "id":"example.com.", "url":"/servers/localhost/zones/example.com.", "name":"example.com", "kind":"Native", "dnssec":false, "account":"", "masters":[ ], "serial":2016041501, "notified_serial":0, "last_check":0, "soa_edit_api":"", "soa_edit":"", "records":[ { "name":"example.com", "type":"NS", "ttl":3600, "disabled":false, "content":"ns1.example.com" }, { "name":"example.com", "type":"SOA", "ttl":3600, "disabled":false, "content":"a.misconfigured.powerdns.server hostmaster.example.com 2016041501 10800 3600 604800 3600" }, { "name":"www.example.com", "type":"A", "ttl":86400, "disabled":false, "content":"192.0.5.1" }, { "name":"example.com", "type":"A", "ttl":300, "disabled":false, "content":"192.0.5.1" } ], "comments":[ ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/0000775000175000017500000000000013160535110022737 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/create_record.json0000664000175000017500000000017413153541406026444 0ustar kamikami00000000000000{ "dns_record": { "name": "blog", "id": 111227, "ttl": 3600, "ip": "123.156.189.2", "type": "A" } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/dns_zone_not_found.json0000664000175000017500000000005613153541406027534 0ustar kamikami00000000000000{ "errors": [ "DnsZone not found" ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/get_record.json0000664000175000017500000000016613153541406025761 0ustar kamikami00000000000000{ "dns_record": { "name": "@", "id": 123, "ttl": 3600, "ip": "123.156.189.1", "type": "A" } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/get_record_after_update.json0000664000175000017500000000016613153541406030504 0ustar kamikami00000000000000{ "dns_record": { "name": "@", "id": 123, "ttl": 4500, "ip": "123.156.189.2", "type": "A" } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/create_zone.json0000664000175000017500000000033013153541406026133 0ustar kamikami00000000000000{ "dns_zone": { "id": 1, "name": "example.com", "user_id": 123, "created_at": "2017-03-24T16:07:05.000+05:30", "updated_at": "2017-03-24T16:07:05.000+05:30", "cdn_reference": 12345678 } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/list_zones.json0000664000175000017500000000073513153541406026037 0ustar kamikami00000000000000[ { "dns_zone": { "id": 1, "name": "example.com", "user_id": 123, "created_at": "2017-03-24T16:07:05.000+05:30", "updated_at": "2017-03-24T16:07:05.000+05:30", "cdn_reference": 12345678 } }, { "dns_zone": { "id": 2, "name": "example.net", "user_id": 124, "created_at": "2017-03-24T16:07:05.000+05:30", "updated_at": "2017-03-24T16:07:05.000+05:30", "cdn_reference": 12345679 } } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/get_zone.json0000664000175000017500000000033013153541406025447 0ustar kamikami00000000000000{ "dns_zone": { "id": 1, "name": "example.com", "user_id": 123, "created_at": "2017-03-24T16:07:05.000+05:30", "updated_at": "2017-03-24T16:07:05.000+05:30", "cdn_reference": 12345678 } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/onapp/list_records.json0000664000175000017500000000245313153541406026341 0ustar kamikami00000000000000{ "dns_zone": { "id": 1, "name": "example.com", "user_id": 123, "created_at": "2017-03-24T16:07:05.000+05:30", "updated_at": "2017-03-24T16:07:05.000+05:30", "cdn_reference": 12345678, "records": { "A": [ { "dns_record": { "name": "@", "id": 111222, "ttl": 3600, "ip": "123.156.189.1", "type": "A" } }, { "dns_record": { "name": "www", "id": 111223, "ttl": 3600, "ip": "123.156.189.1", "type": "A" } } ], "CNAME": [ { "dns_record": { "name": "mail", "id": 111224, "ttl": 3600, "hostname": "examplemail.com", "type": "CNAME" } } ], "MX": [ { "dns_record": { "priority": 20, "name": "@", "id": 111225, "ttl": 3600, "hostname": "mx.examplemail.com", "type": "MX" } }, { "dns_record": { "priority": 10, "name": "@", "id": 111226, "ttl": 3600, "hostname": "mx2.examplemail.com", "type": "MX" } } ] } } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/0000775000175000017500000000000013160535110023130 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/get_zone.xml0000664000175000017500000000130113153541406025466 0ustar kamikami00000000000000 /hostedzone/47234 t.com some unique reference some comment 0 ns1.example.com ns2.example.com ns3.example.com ns4.example.com apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml0000664000175000017500000000046412701023453027741 0ustar kamikami00000000000000 Sender NoSuchHostedZone No hosted zone found with ID: 47234 376c64a6-6194-11e1-847f-ddaa49e4c811 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/record_does_not_exist.xml0000664000175000017500000000101712701023453030237 0ustar kamikami00000000000000 definitely.not.what.you.askedfor.t.com CNAME 86400 t.com apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/create_zone.xml0000664000175000017500000000130612701023453026152 0ustar kamikami00000000000000 /hostedzone/47234 t.com some unique reference some comment 0 ns1.example.com ns2.example.com ns3.example.com ns4.example.com apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/list_zones.xml0000664000175000017500000000332012701023453026043 0ustar kamikami00000000000000 /hostedzone/47234 t.com unique description some comment 0 /hostedzone/48170 newbug.net unique description some comment 0 /hostedzone/48017 newblah.com unique description some comment 0 /hostedzone/47288 fromapi.com unique description some comment 0 /hostedzone/48008 blahnew.com unique description some comment 0 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/list_records.xml0000664000175000017500000000432612701023453026355 0ustar kamikami00000000000000 wibble.t.com CNAME 86400 t.com www.t.com A 86400 208.111.35.173 blahblah.t.com A 86400 208.111.35.173 testdomain.com MX 3600 1 ASPMX.L.GOOGLE.COM. 5 ALT1.ASPMX.L.GOOGLE.COM. 5 ALT2.ASPMX.L.GOOGLE.COM. 10 ASPMX2.GOOGLEMAIL.COM. 10 ASPMX3.GOOGLEMAIL.COM. foo.test.com. SRV 300 1 10 5269 xmpp-server.example.com. 2 12 5060 sip-server.example.com. apache-libcloud-2.2.1/libcloud/test/dns/fixtures/route53/invalid_change_batch.xml0000664000175000017500000000044112701023453027747 0ustar kamikami00000000000000 Sender InvalidChangeBatch Invalid change 376c64a6-6194-11e1-847f-ddaa49e4c811 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/0000775000175000017500000000000013160535110023620 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_example_com_health_check.json0000664000175000017500000000047112701023453032525 0ustar kamikami00000000000000{ "created": "2015-08-07T13:56:59Z", "enabled": true, "health": true, "hostname": "www.pcextreme.nl", "id": "9990ec60-d592-4673-9e7e-9220ed42ee0b", "interval": 10, "ipaddress": "109.72.87.252", "next_run": "2015-08-10T14:22:32Z", "path": "/", "port": 8080, "threshold": 3, "type": "HTTP" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_list.json0000664000175000017500000000125412701023453026525 0ustar kamikami00000000000000[ { "created":"2015-05-21T13:51:12Z", "id":"ffb62570-8414-4578-a346-526b44e320b7", "account_id": "7be65324-6e97-4b73-9427-0f4abcf7d216", "cluster_id": "734f21f4-765e-4fea-b571-eedef73b20c5", "name":"auroradns1.eu", "servers":[ "ns1.auroradns.eu", "ns2.auroradns.info" ] }, { "created":"2015-07-07T08:15:14Z", "id":"e3c012266-72e5-48a1-ad3d-ffa7daa06dc5", "account_id": "78971987-f0a0-4714-8926-5d9206601aa9", "cluster_id": "d842505b-8dd9-4597-a34d-93fde6b61818", "name":"auroradns2.nl", "servers":[ "ns1.auroradns.eu", "ns2.auroradns.info" ] } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_example_com_records.json0000664000175000017500000000165412701023453031570 0ustar kamikami00000000000000[ { "content":"ns1.auroradns.eu admin.auroradns.eu 2015021901 86400 7200 604800 300", "created":"2015-08-07T13:56:51Z", "disabled":false, "health_check_id":null, "id":"8f4a27b2-0b15-4ed7-b652-813b23c2027f", "modified":"2015-08-07T14:11:40Z", "name":"", "prio":null, "ttl":3600, "type":"SOA" }, { "content":"ns1.auroradns.eu", "created":"2015-08-07T13:56:50Z", "disabled":false, "health_check_id":null, "id":"4d8aaffc-2e43-49c6-82cc-a26d01a159bc", "modified":"2015-08-07T14:11:40Z", "name":"", "prio":null, "ttl":3600, "type":"NS" }, { "content":"109.72.87.137", "created":"2015-08-07T13:56:50Z", "disabled":false, "health_check_id":"aecad772-9234-4722-9a05-66ad5ff6a5b0", "id":"30245a1a-5569-4136-900f-864f088c60ee", "modified":"2015-08-10T14:10:17Z", "name":"smtp", "prio":null, "ttl":3600, "type":"A" } ] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_example_com_record_localhost.json0000664000175000017500000000036012701023453033446 0ustar kamikami00000000000000{ "id": "5592f1ff", "content": "127.0.0.1", "disabled": false, "health_check_id": null, "name": "localhost", "prio": null, "ttl": 900, "type": "A", "created": "2015-08-07T13:56:51Z", "modified": "2015-08-07T14:11:40Z" } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_example_com.json0000664000175000017500000000051612701023453030043 0ustar kamikami00000000000000 { "created":"2015-05-21T13:51:12Z", "id":"ffb62570-8414-4578-a346-526b44e320b7", "account_id": "7be65324-6e97-4b73-9427-0f4abcf7d216", "cluster_id": "734f21f4-765e-4fea-b571-eedef73b20c5", "name":"example.com", "servers":[ "ns1.auroradns.eu", "ns2.auroradns.info" ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_example_com_health_checks.json0000664000175000017500000000177412701023453032717 0ustar kamikami00000000000000[ { "created": "2015-08-07T13:56:59Z", "enabled": true, "health": true, "hostname": "www.pcextreme.nl", "id": "9990ec60-d592-4673-9e7e-9220ed42ee0b", "interval": 60, "ipaddress": "109.72.87.252", "next_run": "2015-08-10T14:22:32Z", "path": "/", "port": 80, "threshold": 3, "type": "HTTP" }, { "created": "2015-08-07T13:56:59Z", "enabled": true, "health": true, "hostname": "www.pcextreme.nl", "id": "3f29a813-6a25-41c5-a45e-f771347de526", "interval": 60, "ipaddress": "2a00:f10:101::3eb:1", "next_run": "2015-08-10T14:22:32Z", "path": "/", "port": 80, "threshold": 3, "type": "HTTP" }, { "created": "2015-08-07T13:56:59Z", "enabled": true, "health": true, "hostname": "www.pcextreme.nl", "id": "7719a4c5-b319-46e7-a917-3dc57bdab1d4", "interval": 60, "ipaddress": null, "next_run": "2015-08-10T14:22:32Z", "path": "/", "port": 80, "threshold": 3, "type": "HTTP" } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/auroradns/zone_create.json0000664000175000017500000000051612701023453027015 0ustar kamikami00000000000000 { "created":"2015-05-21T13:51:12Z", "id":"ffb62570-8414-4578-a346-526b44e320b7", "account_id": "7be65324-6e97-4b73-9427-0f4abcf7d216", "cluster_id": "734f21f4-765e-4fea-b571-eedef73b20c5", "name":"example.com", "servers":[ "ns1.auroradns.eu", "ns2.auroradns.info" ] } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/0000775000175000017500000000000013160535110023460 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_CREATE.json0000664000175000017500000000022312701023453026672 0ustar kamikami00000000000000{ "zone": { "name": "example.com", "id": 2, "group": "Default Group", "user-id": 4, "ttl": 3600 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_DELETE.json0000664000175000017500000000005612701023453027115 0ustar kamikami00000000000000{ "zone": { "status": "OK" } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_DELETE.json0000664000175000017500000000007412701023453032163 0ustar kamikami00000000000000{ "zone_mail_redirect": { "status": "OK" } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_ZONE_UPDATE.json0000664000175000017500000000022112701023453027762 0ustar kamikami00000000000000{ "zone": { "id": 1, "name": "example.com", "group": "Other Group", "user-id": 6, "ttl": 3600 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_records_141_UPDATE.json0000664000175000017500000000030612701023453031241 0ustar kamikami00000000000000{ "zone_record": { "name": "updated.com", "data": "1.2.3.5", "id": 141, "aux": null, "record_type": "A", "ttl": 4500, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_GET_1.json0000664000175000017500000000022312701023453026566 0ustar kamikami00000000000000{ "zone": { "name": "example.com", "id": 1, "group": "Default Group", "user-id": 3, "ttl": 3600 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_GET.json0000664000175000017500000000056512701023453026357 0ustar kamikami00000000000000[ { "zone": { "name": "example.com", "id": 1, "group": "Default Group", "user-id": 3, "ttl": 3600 } }, { "zone": { "name": "example2.com", "id": 2, "group": "Default Group", "user-id": 4, "ttl": 3600 } } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_UPDATE.json0000664000175000017500000000041512701023453031200 0ustar kamikami00000000000000{ "zone_redirect": { "name": "redirect3.domain1.com.", "redirect_to": "http://updatedother.com", "id": 36843229, "redirect_type": 302, "iframe_title": null, "redirect_query_string": false, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_LIST.json0000664000175000017500000000115612701023453030774 0ustar kamikami00000000000000[ { "zone_redirect": { "name": "redirect2.domain1.com.", "redirect_to": "http://other.com", "id": 36843229, "redirect_type": 302, "iframe_title": null, "redirect_query_string": false, "zone_id": 1 } }, { "zone_redirect": { "name": "redirect1.domain1.com.", "redirect_to": "http://someother.com", "id": 36843497, "redirect_type": 302, "iframe_title": null, "redirect_query_string": false, "zone_id": 1 } } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_records_150_DELETE.json0000664000175000017500000000006512701023453031223 0ustar kamikami00000000000000{ "zone_record": { "status": "OK" } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_LIST.json0000664000175000017500000000063512701023453031777 0ustar kamikami00000000000000[ { "zone_mail_redirect": { "source_address": "admin", "destination_address": "user@example-site.com", "id": 5, "zone_id": 1 } }, { "zone_mail_redirect": { "source_address": "new_admin", "destination_address": "second.user@example-site.com", "id": 7, "zone_id": 1 } } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_CREATE.json0000664000175000017500000000024612701023453032165 0ustar kamikami00000000000000{ "zone_mail_redirect": { "source_address": "admin", "destination_address": "user@example-site.com", "id": 5, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_DELETE.json0000664000175000017500000000006712701023453031163 0ustar kamikami00000000000000{ "zone_redirect": { "status": "OK" } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_records_GET.json0000664000175000017500000000063312701023453030314 0ustar kamikami00000000000000[{ "zone_record": { "name": "site.example.com", "data": "1.2.3.4", "id": 141, "aux": null, "record_type": "A", "ttl": 3600, "zone_id": 1 } }, { "zone_record": { "name": "site.example1.com", "data": "1.2.3.6", "id": 150, "aux": null, "record_type": "A", "ttl": 3600, "zone_id": 1 } }]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_example_com_records_CREATE.json0000664000175000017500000000031312701023453032764 0ustar kamikami00000000000000{ "zone_record": { "name": "site.example.com", "data": "1.2.3.4", "id": 143, "aux": null, "record_type": "A", "ttl": 3600, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_UPDATE.json0000664000175000017500000000025612701023453032205 0ustar kamikami00000000000000{ "zone_mail_redirect": { "source_address": "new_admin", "destination_address": "new_user@example-site.com", "id": 5, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_example_com_UPDATE.json0000664000175000017500000000022112701023453031260 0ustar kamikami00000000000000{ "zone": { "name": "example.com", "id": 1, "group": "Other Group", "user-id": 3, "ttl": 3600 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/not_found.json0000664000175000017500000000001112701023453026340 0ustar kamikami00000000000000Not foundapache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_GET.json0000664000175000017500000000024612701023453031641 0ustar kamikami00000000000000{ "zone_mail_redirect": { "source_address": "admin", "destination_address": "user@example-site.com", "id": 5, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_CREATE.json0000664000175000017500000000041412701023453031160 0ustar kamikami00000000000000{ "zone_redirect": { "name": "redirect2.domain1.com.", "redirect_to": "http://other.com", "id": 36843229, "redirect_type": 302, "iframe_title": "An Iframe", "redirect_query_string": true, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_records_141_GET.json0000664000175000017500000000031312701023453030674 0ustar kamikami00000000000000{ "zone_record": { "name": "site.example.com", "data": "1.2.3.4", "id": 141, "aux": null, "record_type": "A", "ttl": 3600, "zone_id": 1 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/error.json0000664000175000017500000000020712701023453025505 0ustar kamikami00000000000000{ "zone": { "base": [ "You have reached domains limit. Please upgrade your plan to add more." ] } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/redirect_error.json0000664000175000017500000000036112701023453027367 0ustar kamikami00000000000000{ "zone_redirect": { "redirect_type": [ "is not included in the list" ], "name": [ "There is already A type record or redirect for this subdomain. Please remove it first." ] } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_GET.json0000664000175000017500000000041312701023453030633 0ustar kamikami00000000000000{ "zone_redirect": { "name": "redirect2.domain1.com.", "redirect_to": "http://other.com", "id": 36843229, "redirect_type": 302, "iframe_title": null, "redirect_query_string": false, "zone_id": 162949 } }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/0000775000175000017500000000000013160535110023272 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/empty_zones_list.json0000664000175000017500000000000312703467102027574 0ustar kamikami00000000000000[] apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/create_zone_success.json0000664000175000017500000000043012703467102030217 0ustar kamikami00000000000000{ "name": "microsoft.com", "name_idn": "microsoft.com", "serial": null, "master": "65.55.37.62", "creation_ts": null, "status": "https://www.buddyns.com/api/v2/zone/anexample.com/status/", "delegation": "https://www.buddyns.com/api/v2/zone/anexample.com/delegation/" }apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/zone_already_exists.json0000664000175000017500000000017112703467102030246 0ustar kamikami00000000000000 {"errors": {"name": ["Zone with this Domain already exists."]}, "detail": "Invalid zone submitted for addition."} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/zone_does_not_exist.json0000664000175000017500000000003012703467102030246 0ustar kamikami00000000000000{"detail": "Not found"} apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/get_zone_success.json0000664000175000017500000000045412703467102027541 0ustar kamikami00000000000000{ "name": "myexample.com", "name_idn": "myexample.com", "serial": null, "master": "65.55.37.62", "creation_ts": "2016-04-09T06:20:05.140", "status": "https://www.buddyns.com/api/v2/zone/myexample.com/status/", "delegation": "https://www.buddyns.com/api/v2/zone/myexample.com/delegation/"}apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/delete_zone_success.json0000664000175000017500000000000012703467102030207 0ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/buddyns/list_zones.json0000664000175000017500000000103713153541406026366 0ustar kamikami00000000000000[ { "name" : "microsoft.com", "name_idn" : "microsoft.com", "creation_ts" : "2013-11-06T19:39:38.205", "master" : "65.55.37.62", "serial" : 2013110601, "status": "/api/v2/zone/microsoft.com/status/", "delegation": "/api/v2/zone/microsoft.com/delegation/" }, { "name" : "google.de", "name_idn" : "google.de", "creation_ts" : "2012-06-06T19:53:07.269", "master" : "154.15.200.6", "serial" : 1383743519, "status": "google.de/status/", "delegation": "/api/v2/zone/b?cher.de/delegation/" } ]apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/0000775000175000017500000000000013160535110023632 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_deleteObject.xml0000664000175000017500000000016512701023453033444 0ustar kamikami00000000000000 1 apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/not_found.xml0000664000175000017500000000063712701023453026357 0ustar kamikami00000000000000 faultCode SoftLayer_Exception_ObjectNotFound faultString Unable to find object with id of \'333\'. ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_createObject.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_cr0000664000175000017500000001614712701023453033675 0ustar kamikami00000000000000 data 127.0.0.1 domainId 1752717 expire host www id 50772870 minimum mxPriority refresh retry ttl 86400 type A domain id 1752717 name bar.com serial 2014120804 updateDate 2014-12-08T11:36:55-06:00 resourceRecords data ns1.softlayer.com. domainId 123 expire 1728000 host @ id 50772366 minimum 43200 mxPriority refresh 7200 responsiblePerson support.softlayer.com. retry 600 ttl 86400 type soa data ns1.softlayer.com. domainId 1752717 expire host @ id 50772367 minimum mxPriority refresh retry ttl 86400 type ns data ns2.softlayer.com. domainId 1752717 expire host @ id 50772368 minimum mxPriority refresh retry ttl 86400 type ns apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getObject.xml0000664000175000017500000000121112701023453032752 0ustar kamikami00000000000000 id 123 name bar.com serial 2014120802 updateDate 2014-12-08T14:00:50-06:00 managedResourceFlag 0 ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_editObject.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_ed0000664000175000017500000000016512701023453033652 0ustar kamikami00000000000000 1 ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getByDomainName.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getByDomainName.x0000664000175000017500000000125112701023453033522 0ustar kamikami00000000000000 id 123 name bar.com serial 2014120802 updateDate 2014-12-08T14:00:50-06:00 ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getResourceRecords.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getResourceRecord0000664000175000017500000001252712701023453033707 0ustar kamikami00000000000000 data ns1.softlayer.com. domainId 123 expire 604800 host @ id 50772366 minimum 3600 mxPriority refresh 3600 responsiblePerson root.bar.com. retry 300 ttl 86400 type soa data ns1.softlayer.com. domainId 1752717 expire host @ id 50772367 minimum mxPriority refresh retry ttl 86400 type ns data ns2.softlayer.com. domainId 123 expire host @ id 50772368 minimum mxPriority refresh retry ttl 86400 type ns data 127.0.0.1 domainId 123 expire host @ id 50772365 minimum mxPriority refresh retry ttl 86400 type a apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_createObject.xml0000664000175000017500000002440412701023453033447 0ustar kamikami00000000000000 id 123 name bar.com serial 2014120802 updateDate 2014-12-08T08:00:41-06:00 account accountManagedResourcesFlag 0 accountStatusId 1111 address1 Test 1 allowedPptpVpnQuantity 1 brandId 1 city World claimedTaxExemptTxFlag 0 companyName Test country SI createDate 2014-11-27T12:19:34-06:00 email foo@bar.com firstName foo id 11111 isReseller 0 lastName Bar lateFeeProtectionFlag modifyDate officePhone 1111111111 postalCode 1111 state OT statusDate attributes brand catalogId 14 id 21 keyName SOFTLAYER_EU longName SoftLayer Dutch Holdings B.V. name SoftLayer EU resourceRecords data 127.0.0.1 domainId 123 expire host @ id 50771583 minimum mxPriority refresh retry ttl 86400 type A data ns1.softlayer.com. domainId 123 expire 604800 host @ id 111111 minimum 3600 mxPriority refresh 3600 responsiblePerson root.bar.com. retry 300 ttl 86400 type SOA data ns1.softlayer.com. domainId 1752657 expire host @ id 111111 minimum mxPriority refresh retry ttl 86400 type NS data ns2.softlayer.com. domainId 1111111 expire host @ id 50771586 minimum mxPriority refresh retry ttl 86400 type NS ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_getObject.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_ge0000664000175000017500000000244312701023453033656 0ustar kamikami00000000000000 data ns1.softlayer.com. domainId 123 expire 1728000 host @ id 50772366 minimum 43200 mxPriority refresh 7200 responsiblePerson support.softlayer.com. retry 600 ttl 86400 type soa ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_deleteObject.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_de0000664000175000017500000000016512701023453033652 0ustar kamikami00000000000000 1 ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_getObject_changed.xmlapache-libcloud-2.2.1/libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_ge0000664000175000017500000000242012701023453033651 0ustar kamikami00000000000000 data 1.1.1.1 domainId 123 expire 1728000 host www id 123 minimum 43200 mxPriority refresh 7200 responsiblePerson support.softlayer.com. retry 600 ttl 30 type a apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/0000775000175000017500000000000013160535110023742 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/rec_delete.json0000664000175000017500000000032512701023453026732 0ustar kamikami00000000000000{ "request": { "act": "rec_delete", "tkn": "maybeno", "a": "rec_delete", "z": "example.com", "id": "412563484", "email": "example@example.com" }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/rec_new.json0000664000175000017500000000205012701023453026256 0ustar kamikami00000000000000{ "request": { "act": "rec_new", "a": "rec_new", "name": "test5", "tkn": "maybeno", "content": "127.0.0.3", "ttl": "120", "z": "example.com", "type": "A", "email": "example@example.com" }, "response": { "rec": { "obj": { "rec_id": "412561327", "rec_hash": "ed23e38bca17007e026d2da517adf10a", "zone_name": "example.com", "name": "test5.example.com", "display_name": "test5", "type": "A", "prio": null, "content": "127.0.0.3", "display_content": "127.0.0.3", "ttl": "120", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 0, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } } } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/zone_load_multi.json0000664000175000017500000000367012701023453030031 0ustar kamikami00000000000000{ "request": { "act": "zone_load_multi", "tkn": "maybeno", "a": "zone_load_multi", "email": "example@example.com" }, "response": { "zones": { "has_more": false, "count": 1, "objs": [{ "zone_id": "1234", "user_id": "54321", "zone_name": "example.com", "display_name": "example.com", "zone_status": "V", "zone_mode": "1", "host_id": null, "zone_type": "F", "host_pubname": null, "host_website": null, "vtxt": null, "fqdns": ["kara.ns.cloudflare.com", "noah.ns.cloudflare.com"], "step": "4", "zone_status_class": "status-ac_api_json_html_zone_load_multitive", "zone_status_desc": "CloudFlare powered, this website will be accelerated and protected (info<\/a>)", "ns_vanity_map": [], "orig_registrar": "godaddy", "orig_dnshost": null, "orig_ns_names": "{dns1.stabletransit.com,dns2.stabletransit.com}", "props": { "dns_cname": 0, "dns_partner": 0, "dns_anon_partner": 0, "plan": "FREE_ZONE", "pro": 0, "expired_pro": 0, "pro_sub": 0, "plan_sub": 0, "ssl": 1, "expired_ssl": 0, "expired_rs_pro": 0, "reseller_pro": 0, "reseller_plans": [], "force_interal": 0, "ssl_needed": 0, "alexa_rank": 1307220, "has_vanity": 0 }, "confirm_code": { "zone_delete": "maybeno", "zone_deactivate": "maybeno", "zone_dev_mode1": "maybeno" }, "allow": ["analytics", "threat_control", "zone_delete", "cf_apps", "dns_editor", "cf_settings", "page_rules", "zone_deactivate", "zone_dev_mode1"] }] } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/sec_lvl.json0000664000175000017500000000005112701023453026262 0ustar kamikami00000000000000{ "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/zone_file_purge.json0000664000175000017500000000050312701023453030011 0ustar kamikami00000000000000{ "request": { "act": "zone_file_purge", "url": "https:\/\/www.example.com\/aaaaa.html", "tkn": "maybeno", "z": "example.com", "a": "zone_file_purge", "email": "example@example.com" }, "response": { "url": "https:\/\/www.example.com\/test.html" }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/devmode.json0000664000175000017500000000005112701023453026256 0ustar kamikami00000000000000{ "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/ip_lkup.json0000664000175000017500000000012512701023453026300 0ustar kamikami00000000000000{ "response": { "127.0.0.1": false }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/fpurge_ts.json0000664000175000017500000000020412701023453026631 0ustar kamikami00000000000000{ "response": { "fpurge_ts": 1449381662 }, "result": "success", "msg": null, "attributes": { "cooldown": 20 } } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/zone_check.json0000664000175000017500000000016012701023453026744 0ustar kamikami00000000000000{ "response": { "zones": { "example.com": 4025956 } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/wl.json0000664000175000017500000000020212701023453025253 0ustar kamikami00000000000000{ "response": { "result": { "ip": "127.0.0.1", "action": "WL" } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/zone_settings.json0000664000175000017500000000206612701023453027536 0ustar kamikami00000000000000{ "request": { "act": "zone_settings", "tkn": "maybeno", "a": "zone_settings", "z": "example.com", "email": "example@example.com" }, "response": { "result": { "objs": [{ "userSecuritySetting": "Medium", "dev_mode": 0, "ipv46": 3, "ob": 1, "cache_lvl": "agg", "outboundLinks": "disabled", "bic": "1", "chl_ttl": "3600", "comodo_vc": "a.b", "dnssec": "", "exp_ttl": "86400", "fpurge_ts": "1448144070", "minify": "7", "preload": "0", "sec_lvl": "med", "secureheader_settings": "{\"*\":{\"strict_transport_security\":{\"enable\":1,\"max_age\":2592000},\"content_type_options\":\"nosniff\"}}", "ssl": "1", "outlink": "0", "geoloc": "1", "host_spf": "0", "waf_profile": "off", "email_filter": "1", "sse": "1", "cache_ttl": "14400", "lazy": "0", "async": "0", "ddos": "Off" }] } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/ban.json0000664000175000017500000000020312701023453025372 0ustar kamikami00000000000000{ "response": { "result": { "ip": "127.0.0.1", "action": "BAN" } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/rec_load_all.json0000664000175000017500000003525412701023453027250 0ustar kamikami00000000000000{ "request": { "act": "rec_load_all", "tkn": "maybeno", "a": "rec_load_all", "z": "example.com", "email": "example@example.com" }, "response": { "recs": { "has_more": false, "count": 18, "objs": [{ "rec_id": "364797364", "rec_hash": "479a684c73e0e75c433675e86957660a", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "A", "prio": null, "content": "192.30.252.153", "display_content": "192.30.252.153", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "1", "props": { "proxiable": 1, "cloud_on": 1, "cf_open": 0, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797367", "rec_hash": "acaee7d8c6e256f7bcc5dab4410489cf", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "A", "prio": null, "content": "192.30.252.154", "display_content": "192.30.252.154", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "1", "props": { "proxiable": 1, "cloud_on": 1, "cf_open": 0, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "403456915", "rec_hash": "77cabc2ba4a75814eeda78daabce0859", "zone_name": "example.com", "name": "test2.example.com", "display_name": "test2", "type": "A", "prio": null, "content": "127.0.0.2", "display_content": "127.0.0.2", "ttl": "120", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 0, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "403456984", "rec_hash": "edd95959936d20583e38352831d1978b", "zone_name": "example.com", "name": "test3.example.com", "display_name": "test3", "type": "A", "prio": null, "content": "127.0.0.1", "display_content": "127.0.0.1", "ttl": "120", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 0, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364982413", "rec_hash": "fa43ee2e176f9be7b8e464167bdff440", "zone_name": "example.com", "name": "yesyes.example.com", "display_name": "yesyes", "type": "CNAME", "prio": null, "content": "verify.bing.com", "display_content": "verify.bing.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 1, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364982461", "rec_hash": "4dfb7dd8d8a895174600fe9fb0f48380", "zone_name": "example.com", "name": "google.example.com", "display_name": "google", "type": "CNAME", "prio": null, "content": "google.com", "display_content": "google.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 1, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797370", "rec_hash": "537311442890afedc06e5febf7171023", "zone_name": "example.com", "name": "www.example.com", "display_name": "www", "type": "CNAME", "prio": null, "content": "kami.github.io", "display_content": "kami.github.io", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "1", "props": { "proxiable": 1, "cloud_on": 1, "cf_open": 0, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797388", "rec_hash": "a55e09423fa23c33906131aa678e86ba", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "20", "content": "alt1.aspmx.l.google.com", "display_content": "alt1.aspmx.l.google.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797382", "rec_hash": "c1bc38b678f1c07af3d506a0b4f57da4", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "20", "content": "alt2.aspmx.l.google.com", "display_content": "alt2.aspmx.l.google.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797391", "rec_hash": "90b3f10c8cbe2164b87302de99cf7760", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "30", "content": "aspmx2.googlemail.com", "display_content": "aspmx2.googlemail.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797385", "rec_hash": "99cad8b4ae97d559716c838276d1bf43", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "30", "content": "aspmx3.googlemail.com", "display_content": "aspmx3.googlemail.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797379", "rec_hash": "79e354b6d036b5eec0fe75974e0ffa98", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "30", "content": "aspmx4.googlemail.com", "display_content": "aspmx4.googlemail.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797376", "rec_hash": "ad08cc1813d4061763aecfe0d984bea3", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "30", "content": "aspmx5.googlemail.com", "display_content": "aspmx5.googlemail.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364797373", "rec_hash": "b38ed0e1e250e12f2c1b3a4a163e5526", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "MX", "prio": "10", "content": "aspmx.l.google.com", "display_content": "aspmx.l.google.com", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 }, "mx": { "auto": false } }, { "rec_id": "364982359", "rec_hash": "2f4037b298fb88b77a901725824287c0", "zone_name": "example.com", "name": "google._domainkey.example.com", "display_name": "google._domainkey", "type": "TXT", "prio": null, "content": "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDNCHa8VeffMv+X\/fRkPgHC9MN2Eh5vQqMkWy4e\/YnFbWgF1JilL1Yn9nN54A5WV7lZpCTIvuOC2CrQrIcaBpfr+8SjYsjGO91dz8cwgqZkl7mAjKs7nz8U0PsstuI9i4V3LsHC4NVGOirAgnKA4HXVhxGRuyE94+tuNJ6XDLJoNQIDAQAB", "display_content": "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDNCHa8VeffMv+X\/fRkPgHC9MN2Eh5vQqMkWy4e\/YnFbWgF1JilL1Yn9nN54A5WV7lZpCTIvuOC2CrQrIcaBpfr+8SjYsjGO91dz8cwgqZkl7mAjKs7nz8U0PsstuI9i4V3LsHC4NVGOirAgnKA4HXVhxGRuyE94+tuNJ6XDLJoNQIDAQAB", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797400", "rec_hash": "75788bfdfe314c339e444bfc1c6cfd2e", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "TXT", "prio": null, "content": "google-site-verification=Rgex8ShgIRWUlb9j0Ivw5uHllb0p9skEdJqkSMqvX_o", "display_content": "google-site-verification=Rgex8ShgIRWUlb9j0Ivw5uHllb0p9skEdJqkSMqvX_o", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797394", "rec_hash": "c588426dbf098b57cfcee86815b482d0", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "TXT", "prio": null, "content": "keybase-site-verification=L_shC4yrIjo-yM4qBDKmro9kOH8devqvHrlQtgFa2Us", "display_content": "keybase-site-verification=L_shC4yrIjo-yM4qBDKmro9kOH8devqvHrlQtgFa2Us", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }, { "rec_id": "364797397", "rec_hash": "e20556627e805701c5d0a383458201ae", "zone_name": "example.com", "name": "example.com", "display_name": "example.com", "type": "TXT", "prio": null, "content": "v=spf1 include:_spf.google.com ~all", "display_content": "v=spf1 include:_spf.google.com ~all", "ttl": "1", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 1, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } }] } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/rec_edit.json0000664000175000017500000000210112701023453026407 0ustar kamikami00000000000000{ "request": { "act": "rec_edit", "a": "rec_edit", "name": "test6", "tkn": "maybeno", "id": "412564825", "content": "127.0.0.4", "ttl": "120", "z": "example.com", "type": "A", "email": "example@example.com" }, "response": { "rec": { "obj": { "rec_id": "412564825", "rec_hash": "0cb296652af55023e5bf5ee6c9ad9974", "zone_name": "example.com", "name": "test6.example.com", "display_name": "test6", "type": "A", "prio": null, "content": "127.0.0.4", "display_content": "127.0.0.4", "ttl": "120", "ttl_ceil": 86400, "ssl_id": "2245194", "ssl_status": "V", "ssl_expires_on": null, "auto_ttl": 0, "service_mode": "0", "props": { "proxiable": 0, "cloud_on": 0, "cf_open": 1, "vanity_lock": 0, "ssl": 1, "expired_ssl": 0, "expiring_ssl": 0, "pending_ssl": 0 } } } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/ipv46.json0000664000175000017500000000005112701023453025603 0ustar kamikami00000000000000{ "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/stats.json0000664000175000017500000000213412701023453025775 0ustar kamikami00000000000000{ "response": { "result": { "timeZero": 1448773672000, "timeEnd": 1449378472000, "count": 1, "has_more": false, "objs": [{ "cachedServerTime": 1449378474000, "cachedExpryTime": 1449379074000, "trafficBreakdown": { "pageviews": { "regular": 1804, "threat": 80, "crawler": 438 }, "uniques": { "regular": 1914, "threat": 80, "crawler": 438 } }, "bandwidthServed": { "cloudflare": 422053.22949219, "user": 408767.30664062 }, "requestsServed": { "cloudflare": 29333, "user": 13367 }, "pro_zone": false, "pageLoadTime": null, "currentServerTime": 1449378474000, "interval": 30, "zoneCDate": 1373665252000, "userSecuritySetting": "Medium", "dev_mode": 0, "ipv46": 3, "ob": 1, "cache_lvl": "agg", "outboundLinks": "disabled" }] } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/cache_lvl.json0000664000175000017500000000005112701023453026553 0ustar kamikami00000000000000{ "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/cloudflare/nul_.json0000664000175000017500000000020312701023453025567 0ustar kamikami00000000000000{ "response": { "result": { "ip": "127.0.0.1", "action": "NUL" } }, "result": "success", "msg": null } apache-libcloud-2.2.1/libcloud/test/dns/fixtures/godaddy/0000775000175000017500000000000013160535110023235 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/dns/fixtures/godaddy/v1_domains_agreements.json0000664000175000017500000000044612701023453030410 0ustar kamikami00000000000000[ { "agreementKey": "DNRA", "title": "Domain Name Registration Agreement", "url": "http://www.secureserver.net/agreements/ShowDoc.aspx?pageid=7959&pl_id=1", "content": "\n\nDomain Name Registration and Customer Service Agreement\n

HTTP Status 401 - Bad credentials


type Status report

message Bad credentials

description This request requires HTTP authentication (Bad credentials).


Apache Tomcat/6.0.35

apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/privilege_errors.html0000664000175000017500000000237712701023453030255 0ustar kamikami00000000000000 Apache Tomcat/6.0.35 - Error report

HTTP Status 403 - Access is denied


type Status report

message Access is denied

description Access to the specified resource (Access is denied) has been forbidden.


Apache Tomcat/6.0.35

apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/login.xml0000664000175000017500000000157512701023453025636 0ustar kamikami00000000000000 true ABIQUO Standard user 2 en_US Standard user c69a39bd64ffb77ea7ee3369dce742f3 User apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml0000664000175000017500000000754513153541406031744 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf32614590130080.12.23.43 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml0000664000175000017500000000067712701023453031645 0ustar kamikami00000000000000 You can keep track of the progress in the link apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml0000664000175000017500000000753613153541406031621 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ON914f8125-33d3-4fe3-a162-5d6f5bf32614590130080.12.23.43 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml0000664000175000017500000000066512701023453031277 0ustar kamikami00000000000000 You can keep track of the progress in the link apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml0000664000175000017500000000326012701023453026771 0ustar kamikami00000000000000 0 0 6 libcloud <connections/> 0 DEPLOYED apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml0000664000175000017500000000744613153541406027674 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf326140300 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml0000664000175000017500000000276612701023453033250 0ustar kamikami00000000000000 Undeploy task's power off on virtual machine with id 3 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.103a1a6b-4de2-48d8-9a38-8698561020b8 1da8c8b6-86f6-49ef-9d29-57dcc73b875a UNKNOWN DONE 1358012669 POWER_OFF Undeploy task's deconfigure on virtual machine with id 3 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.e4250ca9-505d-4640-9ad2-fb101f9e9978 1da8c8b6-86f6-49ef-9d29-57dcc73b875a UNKNOWN DONE 1358012669 DECONFIGURE 3 FINISHED_UNSUCCESSFULLY 1da8c8b6-86f6-49ef-9d29-57dcc73b875a 1358012669 UNDEPLOY admin apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4.xml0000664000175000017500000000631312701023453025520 0ustar kamikami00000000000000 0 0 0 0 0 0 0 0 0 0 0 0 KVM 4 vdc_kvm
192.168.0.0
192.168.0.1 2 24 default_private_network INTERNAL
apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml0000664000175000017500000000274712701023453032324 0ustar kamikami00000000000000 Deploy task's configure on virtual machine with id 3 b44fe278-6b0f-4dfb-be81-7c03006a93cb.a04623bd-7b2c-4c23-9441-aeaa39dd4893 b44fe278-6b0f-4dfb-be81-7c03006a93cb UNKNOWN DONE 1357872859 CONFIGURE Deploy task's power on on virtual machine with id 3 b44fe278-6b0f-4dfb-be81-7c03006a93cb.2fdee19a-4fad-4040-bc94-7acfd6fedc48 b44fe278-6b0f-4dfb-be81-7c03006a93cb UNKNOWN DONE 1357872859 POWER_ON 3 FINISHED_SUCCESSFULLY b44fe278-6b0f-4dfb-be81-7c03006a93cb 1357872859 DEPLOY user apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/ent_1.xml0000664000175000017500000000474112701023453025532 0ustar kamikami00000000000000 0 0 0 0 0 0 0 0 0 0 0 0 1 false Abiquo 0 0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml0000664000175000017500000000203512701023453032655 0ustar kamikami00000000000000 Reset task's reset on virtual machine with id 3 a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 a8c9818e-f389-45b7-be2c-3db3a9689940 ROLLBACK_DONE FAILED 1357873142 RESET 3 FINISHED_SUCCESSFULLY a8c9818e-f389-45b7-be2c-3db3a9689940 1357873142 RESET user apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml0000664000175000017500000000744213153541406031701 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf326140300 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml0000664000175000017500000000673712701023453026743 0ustar kamikami00000000000000 2 006libcloud0DEPLOYED 005libcloud_test_group0NOT_DEPLOYED apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml0000664000175000017500000000320112701023453031175 0ustar kamikami00000000000000 0 0 5 libcloud_test_group 0 NEEDS_SYNC ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.x0000664000175000017500000000203712701023453033632 0ustar kamikami00000000000000 Reset task's reset on virtual machine with id 3 a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 a8c9818e-f389-45b7-be2c-3db3a9689940 ROLLBACK_DONE FAILED 1357873142 RESET 3 FINISHED_UNSUCCESSFULLY a8c9818e-f389-45b7-be2c-3db3a9689940 1357873142 RESET user apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml0000664000175000017500000000762113153541406032616 0ustar kamikami00000000000000 1 0 3 1 1 ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614 128 NOT_ALLOCATED 914f8125-33d3-4fe3-a162-5d6f5bf32614 0 3 0 0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml0000664000175000017500000000227712701023453027074 0ustar kamikami00000000000000 2virtual image repo010.60.1.72:/opt/vm_repository0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml0000664000175000017500000000675513153541406032071 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf326140 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml0000664000175000017500000000320312701023453031116 0ustar kamikami00000000000000 0 0 5 libcloud_test_group 0 NOT_DEPLOYED apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml0000664000175000017500000001016512701023453031203 0ustar kamikami00000000000000 11m0n0wall-vhdm0n0wall image in VHD format ready for XenServer and HyperV1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhdVHD_SPARSE10490880112827262976false02013-01-10T20:25:12-05:00SYSTEMfalsehttp://icons.abiquo.com/monowall.jpg 19 RHEL6 Build Bot RHEL6 Build Bot 1/rs/abiport9000/ovf/73/build-bot-rhel6-disk1.vmdk VMDK_STREAM_OPTIMIZED 351064576 1 1024 4294967296 false 0 2013-01-10T20:25:12-05:00 SYSTEM false http://rs.bcn.abiquo.com:9000/public/icons/q.png apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml0000664000175000017500000000273112701023453033621 0ustar kamikami00000000000000 Deploy task's configure on virtual machine with id 3 b44fe278-6b0f-4dfb-be81-7c03006a93cb.a04623bd-7b2c-4c23-9441-aeaa39dd4893 b44fe278-6b0f-4dfb-be81-7c03006a93cb UNKNOWN DONE 1357872859 CONFIGURE Deploy task's power on on virtual machine with id 3 b44fe278-6b0f-4dfb-be81-7c03006a93cb.2fdee19a-4fad-4040-bc94-7acfd6fedc48 b44fe278-6b0f-4dfb-be81-7c03006a93cb UNKNOWN DONE 1357872859 POWER_ON 3 ABORTED b44fe278-6b0f-4dfb-be81-7c03006a93cb 1357872859 DEPLOY user apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml0000664000175000017500000000744613153541406027675 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf326140300 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdcs.xml0000664000175000017500000000665612701023453025472 0ustar kamikami00000000000000 0 0 0 0 0 0 0 0 0 0 0 0 KVM 4 vdc_kvm
192.168.0.0
192.168.0.1 2 24 default_private_network INTERNAL
apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml0000664000175000017500000000052612701023453030712 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml0000664000175000017500000000067712701023453031130 0ustar kamikami00000000000000 You can keep track of the progress in the link apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/not_found_error.xml0000664000175000017500000000030112701023453027714 0ustar kamikami00000000000000 DC-0 The requested datacenter does not exist apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml0000664000175000017500000000405212701023453031317 0ustar kamikami00000000000000 11 m0n0wall-vhd m0n0wall image in VHD format ready for XenServer and HyperV 1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhd VHD_SPARSE 10490880 1 128 27262976 false 0 2013-01-10T20:25:12-05:00 SYSTEM false http://icons.abiquo.com/monowall.jpg apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml0000664000175000017500000000755113153541406027731 0ustar kamikami00000000000000 10311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf32614590130080.12.23.43 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml0000664000175000017500000000216012701023453027121 0ustar kamikami00000000000000 2virtual image repo010.60.1.72:/opt/vm_repository0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml0000664000175000017500000000165712701023453030741 0ustar kamikami00000000000000 64192.168.0.252:54:00:b7:f7:850 36434.34.34.552:54:00:b7:f7:880 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml0000664000175000017500000000203512701023453032140 0ustar kamikami00000000000000 Reset task's reset on virtual machine with id 3 a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 a8c9818e-f389-45b7-be2c-3db3a9689940 ROLLBACK_DONE FAILED 1357873142 RESET 3 FINISHED_SUCCESSFULLY a8c9818e-f389-45b7-be2c-3db3a9689940 1357873142 RESET user apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/dcs.xml0000664000175000017500000001247112701023453025274 0ustar kamikami00000000000000 2 barcelona barcelona 3 1 VIRTUAL_FACTORY http://10.60.12.7:80/virtualfactory 4 1 VIRTUAL_SYSTEM_MONITOR http://10.60.12.7:80/vsm 5 1 APPLIANCE_MANAGER http://10.60.12.7:80/am 6 1 NODE_COLLECTOR http://10.60.12.7:80/nodecollector 7 1 STORAGE_SYSTEM_MONITOR http://10.60.12.7:80/ssm 8 1 DHCP_SERVICE omapi://10.60.12.7:7911 9 1 BPM_SERVICE http://10.60.12.7:80/bpm-async Abiquo apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml0000664000175000017500000000276412701023453031742 0ustar kamikami00000000000000 Undeploy task's power off on virtual machine with id 3 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.103a1a6b-4de2-48d8-9a38-8698561020b8 1da8c8b6-86f6-49ef-9d29-57dcc73b875a UNKNOWN DONE 1358012669 POWER_OFF Undeploy task's deconfigure on virtual machine with id 3 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.e4250ca9-505d-4640-9ad2-fb101f9e9978 1da8c8b6-86f6-49ef-9d29-57dcc73b875a UNKNOWN DONE 1358012669 DECONFIGURE 3 FINISHED_SUCCESSFULLY 1da8c8b6-86f6-49ef-9d29-57dcc73b875a 1358012669 UNDEPLOY admin apache-libcloud-2.2.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml0000664000175000017500000000320312701023453026765 0ustar kamikami00000000000000 0 0 5 libcloud_test_group 0 NOT_DEPLOYED apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/0000775000175000017500000000000013160535107024510 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json0000664000175000017500000000470012701023453031262 0ustar kamikami00000000000000{ "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "cloud_ips": [], "console_token": null, "console_token_expires": null, "console_url": null, "created_at": "2012-04-12T10:58:49Z", "deleted_at": null, "fqdn": "srv-p61uj.gb1.brightbox.com", "hostname": "srv-p61uj", "id": "srv-p61uj", "image": { "arch": "x86_64", "created_at": "2012-04-11T01:36:19Z", "description": "Standard server with cloud-init", "id": "img-n4yek", "name": "Ubuntu Precise 12.04 LTS server", "owner": "acc-tqs4c", "resource_type": "image", "source": "precise-amd64-17903.gz", "status": "deprecated", "url": "https://api.gb1.brightbox.com/1.0/images/img-n4yek", "username": "ubuntu" }, "interfaces": [ { "id": "int-ctud9", "ipv4_address": "10.240.156.30", "ipv6_address": "2a02:1348:14c:2707:24:19ff:fef0:9c1e", "mac_address": "02:24:19:f0:9c:1e", "resource_type": "interface", "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ctud9" } ], "name": "Test Node", "resource_type": "server", "server_groups": [ { "created_at": "2011-08-24T08:41:56Z", "default": true, "description": "All new servers are added to this group unless specified otherwise.", "id": "grp-irgkb", "name": "default", "resource_type": "server_group", "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" } ], "server_type": { "cores": 2, "disk_size": 20480, "handle": "nano", "id": "typ-4nssg", "name": "Brightbox Nano Instance", "ram": 512, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" }, "snapshots": [], "started_at": null, "status": "creating", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-p61uj", "user_data": null, "zone": { "handle": "gb1-a", "id": "zon-6mxqw", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/list_server_types.json0000664000175000017500000000443612701023453031173 0ustar kamikami00000000000000[ { "cores": 2, "disk_size": 20480, "handle": "nano", "id": "typ-4nssg", "name": "Brightbox Nano Instance", "ram": 512, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" }, { "cores": 8, "disk_size": 163840, "handle": "medium", "id": "typ-qdiwq", "name": "Brightbox Medium Instance", "ram": 4096, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-qdiwq" }, { "cores": 4, "disk_size": 81920, "handle": "small", "id": "typ-urtky", "name": "Brightbox Small Instance", "ram": 2048, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-urtky" }, { "cores": 8, "disk_size": 327680, "handle": "large", "id": "typ-mlbt7", "name": "Brightbox Large Instance", "ram": 8192, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-mlbt7" }, { "cores": 4, "disk_size": 40960, "handle": "mini", "id": "typ-iqisj", "name": "Brightbox Mini Instance", "ram": 1024, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-iqisj" }, { "cores": 8, "disk_size": 655360, "handle": "xl", "id": "typ-wdicw", "name": "Brightbox XL Instance", "ram": 16384, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-wdicw" }, { "cores": 8, "disk_size": 1310720, "handle": "xxl", "id": "typ-lr76m", "name": "Brightbox XXL Instance", "ram": 32768, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-lr76m" } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json0000664000175000017500000000470112701023453031264 0ustar kamikami00000000000000{ "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "cloud_ips": [], "console_token": null, "console_token_expires": null, "console_url": null, "created_at": "2012-04-12T11:01:51Z", "deleted_at": null, "fqdn": "srv-nnumd.gb1.brightbox.com", "hostname": "srv-nnumd", "id": "srv-nnumd", "image": { "arch": "x86_64", "created_at": "2012-04-11T01:36:19Z", "description": "Standard server with cloud-init", "id": "img-n4yek", "name": "Ubuntu Precise 12.04 LTS server", "owner": "acc-tqs4c", "resource_type": "image", "source": "precise-amd64-17903.gz", "status": "deprecated", "url": "https://api.gb1.brightbox.com/1.0/images/img-n4yek", "username": "ubuntu" }, "interfaces": [ { "id": "int-2chhk", "ipv4_address": "10.232.142.194", "ipv6_address": "2a02:1348:14d:23b0:24:19ff:fee8:8ec2", "mac_address": "02:24:19:e8:8e:c2", "resource_type": "interface", "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-2chhk" } ], "name": "Test Node", "resource_type": "server", "server_groups": [ { "created_at": "2011-08-24T08:41:56Z", "default": true, "description": "All new servers are added to this group unless specified otherwise.", "id": "grp-irgkb", "name": "default", "resource_type": "server_group", "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" } ], "server_type": { "cores": 2, "disk_size": 20480, "handle": "nano", "id": "typ-4nssg", "name": "Brightbox Nano Instance", "ram": 512, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" }, "snapshots": [], "started_at": null, "status": "creating", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-nnumd", "user_data": null, "zone": { "handle": "gb1-b", "id": "zon-remk1", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-remk1" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/list_servers.json0000664000175000017500000001273112701023453030127 0ustar kamikami00000000000000 [ { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "cloud_ips": [ { "id": "cip-tlrp3", "public_ip": "109.107.35.16", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-35-16.gb1.brightbox.com", "status": "mapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-tlrp3" } ], "created_at": "2010-10-14T10:02:38Z", "deleted_at": null, "fqdn": "srv-xvpn7.gb1.brightbox.com", "hostname": "srv-xvpn7", "id": "srv-xvpn7", "image": { "arch": "i686", "created_at": "2010-10-11T15:23:51Z", "description": "", "id": "img-arm8f", "name": "Snapshot of srv-vf2a4 11 Oct 15:23", "owner": "acc-tqs4c", "resource_type": "image", "source": "srv-vf2a4", "status": "deleted", "url": "https://api.gb1.brightbox.com/1.0/images/img-arm8f", "username": null }, "interfaces": [ { "id": "int-519up", "ipv4_address": "10.74.210.210", "mac_address": "02:24:19:4a:d2:d2", "resource_type": "interface", "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-519up" } ], "name": "Ubuntu Image Builder Box", "resource_type": "server", "server_groups": [ { "created_at": "2011-08-24T08:41:56Z", "default": true, "description": "All new servers are added to this group unless specified otherwise.", "id": "grp-irgkb", "name": "default", "resource_type": "server_group", "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" } ], "server_type": { "cores": 4, "disk_size": 81920, "handle": "small", "id": "typ-urtky", "name": "Brightbox Small Instance", "ram": 2048, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-urtky" }, "snapshots": [], "started_at": "2012-03-28T15:25:56Z", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-xvpn7", "zone": { "handle": "gb1-a", "id": "zon-6mxqw", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" } }, { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "cloud_ips": [], "created_at": "2012-01-30T14:42:28Z", "deleted_at": null, "fqdn": "srv-742vn.gb1.brightbox.com", "hostname": "srv-742vn", "id": "srv-742vn", "image": { "arch": "x86_64", "created_at": "2012-01-30T13:25:09Z", "description": "", "id": "img-j93gd", "name": "Snapshot of srv-k0pug 30 Jan 13:25", "owner": "acc-tqs4c", "resource_type": "image", "source": "srv-k0pug", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/images/img-j93gd", "username": null }, "interfaces": [ { "id": "int-ztqbx", "ipv4_address": "10.240.228.234", "ipv6_address": "2a02:1348:14c:393a:24:19ff:fef0:e4ea", "mac_address": "02:24:19:f0:e4:ea", "resource_type": "interface", "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ztqbx" } ], "name": "Kernel builder", "resource_type": "server", "server_groups": [ { "created_at": "2011-08-24T08:41:56Z", "default": true, "description": "All new servers are added to this group unless specified otherwise.", "id": "grp-irgkb", "name": "default", "resource_type": "server_group", "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" } ], "server_type": { "cores": 8, "disk_size": 163840, "handle": "medium", "id": "typ-qdiwq", "name": "Brightbox Medium Instance", "ram": 4096, "resource_type": "server_type", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-qdiwq" }, "snapshots": [], "started_at": "2012-03-28T15:26:43Z", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-742vn", "zone": { "handle": "gb1-a", "id": "zon-6mxqw", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" } } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/list_images.json0000664000175000017500000000562012701023453027702 0ustar kamikami00000000000000[ { "ancestor": { "arch": "i686", "created_at": "2010-10-04T19:03:37Z", "description": "Creates a blank disk", "id": "img-6lybc", "name": "Blank Image", "owner": "acc-tqs4c", "resource_type": "image", "source": "blank_10G", "status": "deleted", "url": "https://api.gb1.brightbox.com/1.0/images/img-6lybc", "username": null }, "arch": "i686", "compatibility_mode": false, "created_at": "2010-10-02T19:03:37Z", "description": "login: root using stored ssh key", "disk_size": 1086, "id": "img-99q79", "min_ram": null, "name": "CentOS 5.5 server", "official": true, "owner": "acc-tqs4c", "public": true, "resource_type": "image", "source": "srv-s4mfq", "source_type": "upload", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/images/img-99q79", "username": null, "virtual_size": 10244 }, { "ancestor": { "arch": "i686", "created_at": "2010-10-04T19:03:37Z", "description": "Creates a blank disk", "id": "img-6lybc", "name": "Blank Image", "owner": "acc-tqs4c", "resource_type": "image", "source": "blank_10G", "status": "deleted", "url": "https://api.gb1.brightbox.com/1.0/images/img-6lybc", "username": null }, "arch": "x86_64", "compatibility_mode": false, "created_at": "2010-10-03T19:03:37Z", "description": "login: root using stored ssh key", "disk_size": 1133, "id": "img-pnqnc", "min_ram": null, "name": "CentOS 5.5 server", "official": true, "owner": "acc-tqs4c", "public": true, "resource_type": "image", "source": "srv-53fez", "source_type": "upload", "status": "available", "url": "https://api.gb1.brightbox.com/1.0/images/img-pnqnc", "username": null, "virtual_size": 10240 }, { "ancestor": null, "arch": "i686", "compatibility_mode": false, "created_at": "2012-01-22T05:36:24Z", "description": "Standard server with cloud-init", "disk_size": 671, "id": "img-joo06", "min_ram": null, "name": "Ubuntu Oneiric 11.10 server", "official": false, "owner": "acc-tqs4c", "public": true, "resource_type": "image", "source": "oneiric-i386-20178.gz", "source_type": "upload", "status": "deprecated", "url": "https://api.gb1.brightbox.com/1.0/images/img-joo06", "username": "ubuntu", "virtual_size": 1025 } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/create_server.json0000664000175000017500000000316612701023453030236 0ustar kamikami00000000000000{"id": "srv-3a97e", "url": "servers/(server_id)", "name": "My web server", "status": "active", "hostname": "srv-3a97e.gb1.brightbox.com", "created_at": "", "deleted_at": "", "started_at": "", "account": {"id": "acc-3jd8s", "url": "accounts/(account_id)", "name": "Brightbox Systems Ltd.", "status": "verified", "ram_limit": 20480, "ram_used": 2048, "limits_cloudips": 5}, "image": {"id": "img-9vxqi", "url": "images/(image_id)", "name": "Brightbox Lucid 32", "status": "available", "description": "Jeremy's debian ec2 image", "source": "jeremy_debian-32_ec2", "source_type": "upload", "arch": "32-bit", "created_at": "", "owner": "acc-bright"}, "server_type": {"id": "typ-a97e6", "url": "server_types/(server_type_id)", "handle": "nano", "name": "Brightbox Nano", "status": "", "cores": 2, "ram": 2048, "disk_size": ""}, "zone": {"id": "zon-8ja0a", "url": "zones/(zone_id)", "handle": "gb1-a"}, "snapshots": [{"id": "img-9vxqi", "url": "images/(image_id)", "name": "Brightbox Lucid 32", "status": "available", "description": "Jeremy's debian ec2 image", "source": "jeremy_debian-32_ec2", "source_type": "upload", "arch": "32-bit", "created_at": "", "owner": "acc-bright"}], "cloud_ips": [{"id": "cip-ja8ub", "url": "cloud_ips/(cloud_ip_id)", "public_ip": "109.107.42.129", "status": "mapped", "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], "interfaces": [{"id": "int-mc3a9", "url": "interfaces/(interface_id)", "mac_address": "02:24:19:6e:18:36", "ipv4_address": "10.110.24.54"}]}apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/token.json0000664000175000017500000000010712701023453026515 0ustar kamikami00000000000000{"access_token":"k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json0000664000175000017500000000636012701023453030420 0ustar kamikami00000000000000[ { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "id": "cip-tlrp3", "interface": null, "load_balancer": null, "public_ip": "109.107.35.16", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-35-16.gb1.brightbox.com", "server": null, "server_group": null, "status": "unmapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-tlrp3" }, { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "id": "cip-w8vbt", "interface": null, "load_balancer": null, "public_ip": "109.107.35.76", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-35-76.gb1.brightbox.com", "server": null, "server_group": null, "status": "unmapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-w8vbt" }, { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "id": "cip-uswab", "interface": { "id": "int-ztqbx", "ipv4_address": "10.240.228.234", "ipv6_address": "2a02:1348:14c:393a:24:19ff:fef0:e4ea", "mac_address": "02:24:19:f0:e4:ea", "resource_type": "interface", "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ztqbx" }, "load_balancer": null, "public_ip": "109.107.35.105", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-35-105.gb1.brightbox.com", "server": { "created_at": "2012-01-30T14:42:28Z", "deleted_at": null, "fqdn": "srv-742vn.gb1.brightbox.com", "hostname": "srv-742vn", "id": "srv-742vn", "name": "Kernel builder", "resource_type": "server", "started_at": "2012-03-28T15:26:43Z", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-742vn" }, "server_group": null, "status": "mapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-uswab" }, { "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "id": "cip-ui4n1", "interface": null, "load_balancer": null, "public_ip": "109.107.37.135", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-37-135.gb1.brightbox.com", "server": null, "server_group": null, "status": "unmapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-ui4n1" } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/list_zones.json0000664000175000017500000000053012701023453027566 0ustar kamikami00000000000000[ { "handle": "gb1-a", "id": "zon-6mxqw", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" }, { "handle": "gb1-b", "id": "zon-remk1", "resource_type": "zone", "url": "https://api.gb1.brightbox.com/1.0/zones/zon-remk1" } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json0000664000175000017500000000105612701023453030522 0ustar kamikami00000000000000{ "account": { "id": "acc-tqs4c", "name": "bbctest", "resource_type": "account", "status": "active", "url": "https://api.gb1.brightbox.com/1.0/account" }, "id": "cip-jsjc5", "interface": null, "load_balancer": null, "public_ip": "109.107.37.234", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-37-234.gb1.brightbox.com", "server": null, "server_group": null, "status": "unmapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-jsjc5" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/0000775000175000017500000000000013160535110024522 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests0000664000175000017500000011053312701023453034047 0ustar kamikami00000000000000 accountId 282402 createDate 2013-11-06T14:38:36+01:00 dedicatedAccountHostOnlyFlag 0 domain example.com fullyQualifiedDomainName libcloud-testing1.example.com hostname libcloud-testing1 id 2875152 lastPowerStateId lastVerifiedDate maxCpu 1 maxCpuUnits CORE maxMemory 2048 metricPollDate modifyDate 2013-11-06T14:41:25+01:00 startCpus 1 statusId 1001 uuid ab98fc82-9d74-bc34-6701-f88957ebbba8 billingItem allowCancellationFlag 1 cancellationDate categoryCode guest_core createDate 2013-11-06T14:38:38+01:00 currentHourlyCharge .112 cycleStartDate 2013-11-06T14:43:20+01:00 description 1 x 2.0 GHz Core domainName example.com hostName libcloud-testing1 hourlyRecurringFee .056 hoursUsed 2 id 16447700 laborFee 0 laborFeeTaxRate .21 lastBillDate 2013-11-06T14:43:20+01:00 modifyDate 2013-11-06T14:43:20+01:00 nextBillDate 2013-11-28T07:00:00+01:00 oneTimeFee 0 oneTimeFeeTaxRate .21 orderItemId 22662589 parentId recurringFee .112 recurringFeeTaxRate .21 recurringMonths 1 serviceProviderId 1 setupFee 0 setupFeeTaxRate .21 resourceTableId 2875152 datacenter id 168642 longName San Jose 1 name sjc01 globalIdentifier 9ee84f87-e558-4548-9b89-fe7d997706b8 operatingSystem hardwareId id 2200995 manufacturerLicenseInstance passwords createDate 2013-11-06T14:38:56+01:00 id 1856975 modifyDate 2013-11-06T14:38:56+01:00 password L3TJVubf port softwareId 2200995 username root softwareLicense id 1523 softwareDescriptionId 1163 softwareDescription controlPanel 0 id 1163 longDescription Debian / Debian / 7.0.0-64 Minimal for CCI manufacturer Debian name Debian operatingSystem 1 referenceCode DEBIAN_7_64 upgradeSoftwareDescriptionId upgradeSwDescId version 7.0.0-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root powerState keyName RUNNING name Running primaryBackendIpAddress 10.55.43.130 primaryIpAddress 50.23.95.202 accountId 282402 createDate 2013-11-06T15:14:52+01:00 dedicatedAccountHostOnlyFlag 0 domain example.com fullyQualifiedDomainName libcloud-testing2.example.com hostname libcloud-testing2 id 2875213 lastPowerStateId lastVerifiedDate maxCpu 1 maxCpuUnits CORE maxMemory 2048 metricPollDate modifyDate 2013-11-06T15:17:56+01:00 startCpus 1 statusId 1001 uuid 8f10bea1-3e26-70d7-d581-0d9e820eae0c billingItem allowCancellationFlag 1 cancellationDate categoryCode guest_core createDate 2013-11-06T15:14:55+01:00 currentHourlyCharge .056 cycleStartDate 2013-11-06T15:19:50+01:00 description 1 x 2.0 GHz Core domainName example.com hostName libcloud-testing2 hourlyRecurringFee .056 hoursUsed 1 id 16447908 laborFee 0 laborFeeTaxRate .21 lastBillDate 2013-11-06T15:19:50+01:00 modifyDate 2013-11-06T15:19:50+01:00 nextBillDate 2013-11-28T07:00:00+01:00 oneTimeFee 0 oneTimeFeeTaxRate .21 orderItemId 22663091 parentId recurringFee .056 recurringFeeTaxRate .21 recurringMonths 1 serviceProviderId 1 setupFee 0 setupFeeTaxRate .21 resourceTableId 2875213 datacenter id 168642 longName San Jose 1 name sjc01 globalIdentifier 5c704e34-6ee7-4efe-9722-af9d406fa930 operatingSystem hardwareId id 2201027 manufacturerLicenseInstance softwareLicense id 1523 softwareDescriptionId 1163 softwareDescription controlPanel 0 id 1163 longDescription Debian / Debian / 7.0.0-64 Minimal for CCI manufacturer Debian name Debian operatingSystem 1 referenceCode DEBIAN_7_64 upgradeSoftwareDescriptionId upgradeSwDescId version 7.0.0-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root powerState keyName INITIATING name Running primaryBackendIpAddress 10.55.43.131 primaryIpAddress 50.23.95.203 accountId 282402 createDate 2013-11-06T15:36:53+01:00 dedicatedAccountHostOnlyFlag 0 domain example.com fullyQualifiedDomainName libcloud-testing.example.com hostname libcloud-testing id 2875273 lastPowerStateId lastVerifiedDate maxCpu 1 maxCpuUnits CORE maxMemory 2048 metricPollDate modifyDate 2013-11-06T15:39:35+01:00 startCpus 1 statusId 1001 uuid f86371c5-103b-34d3-ae27-e4dafa1c4718 billingItem allowCancellationFlag 1 cancellationDate categoryCode guest_core createDate 2013-11-06T15:36:55+01:00 currentHourlyCharge .056 cycleStartDate 2013-11-06T15:41:31+01:00 description 1 x 2.0 GHz Core domainName example.com hostName libcloud-testing hourlyRecurringFee .056 hoursUsed 1 id 16448162 laborFee 0 laborFeeTaxRate .21 lastBillDate 2013-11-06T15:41:31+01:00 modifyDate 2013-11-06T15:41:31+01:00 nextBillDate 2013-11-28T07:00:00+01:00 oneTimeFee 0 oneTimeFeeTaxRate .21 orderItemId 22663578 parentId recurringFee .056 recurringFeeTaxRate .21 recurringMonths 1 serviceProviderId 1 setupFee 0 setupFeeTaxRate .21 resourceTableId 2875273 datacenter id 168642 longName San Jose 1 name sjc01 globalIdentifier e8ab9d1c-edd8-4a1a-a13c-ff74838b5ab6 operatingSystem hardwareId id 2201049 manufacturerLicenseInstance passwords createDate 2013-11-06T15:37:10+01:00 id 1857066 modifyDate 2013-11-06T15:37:10+01:00 password HmyHw89J port softwareId 2201049 username root softwareLicense id 1523 softwareDescriptionId 1163 softwareDescription controlPanel 0 id 1163 longDescription Debian / Debian / 7.0.0-64 Minimal for CCI manufacturer Debian name Debian operatingSystem 1 referenceCode DEBIAN_7_64 upgradeSoftwareDescriptionId upgradeSwDescId version 7.0.0-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root powerState keyName RUNNING name Running primaryBackendIpAddress 10.55.43.132 primaryIpAddress 50.23.95.204 ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_create0000664000175000017500000000224512701023453033770 0ustar kamikami00000000000000 id 1 key ssh-key label my-key-pair label my-key-pair fingerprint 1f:51:ae:28:bf:89:e9:d8:1f:25:5d:37:2d:7d:b8:ca:9f:f5:f1:6f apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml0000664000175000017500000000356212701023453033560 0ustar kamikami00000000000000 id 1 key ssh-key label test1 fingerprint 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 id 2 key ssh-key label test2 fingerprint 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject0000664000175000017500000002560112701023453033775 0ustar kamikami00000000000000 accountId 282402 createDate 2013-11-12T19:33:57+01:00 dedicatedAccountHostOnlyFlag 0 domain example.com fullyQualifiedDomainName libcloud-testing.example.com hostname libcloud-testing id 2905761 lastPowerStateId lastVerifiedDate maxCpu 1 maxCpuUnits CORE maxMemory 2048 metricPollDate modifyDate 2013-11-12T19:36:55+01:00 startCpus 1 statusId 1001 uuid cbc33604-afd0-4820-57c3-6c68ae7c5fe0 billingItem allowCancellationFlag 1 cancellationDate categoryCode guest_core createDate 2013-11-12T19:33:59+01:00 currentHourlyCharge .056 cycleStartDate 2013-11-12T19:39:03+01:00 description 1 x 2.0 GHz Core domainName example.com hostName libcloud-testing hourlyRecurringFee .056 hoursUsed 1 id 16538495 laborFee 0 laborFeeTaxRate .21 lastBillDate 2013-11-12T19:39:03+01:00 modifyDate 2013-11-12T19:39:03+01:00 nextBillDate 2013-11-28T07:00:00+01:00 oneTimeFee 0 oneTimeFeeTaxRate .21 orderItemId 22774406 parentId recurringFee .056 recurringFeeTaxRate .21 recurringMonths 1 serviceProviderId 1 setupFee 0 setupFeeTaxRate .21 resourceTableId 2905761 globalIdentifier 633fd9e3-4cf7-4c78-b746-c2b76e2c8b88 managedResourceFlag 0 operatingSystem hardwareId id 2211183 manufacturerLicenseInstance passwords createDate 2013-11-12T19:34:16+01:00 id 1867597 modifyDate 2013-11-12T19:34:16+01:00 password LTSp4cpJ port softwareId 2211183 username root softwareLicense id 1523 softwareDescriptionId 1163 softwareDescription controlPanel 0 id 1163 longDescription Debian / Debian / 7.0.0-64 Minimal for CCI manufacturer Debian name Debian operatingSystem 1 referenceCode DEBIAN_7_64 upgradeSoftwareDescriptionId upgradeSwDescId version 7.0.0-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root powerState keyName RUNNING name Running primaryBackendIpAddress 10.55.62.124 primaryIpAddress 50.97.215.202 provisionDate 2013-11-12T19:39:03+01:00 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml0000664000175000017500000000060212701023453030630 0ustar kamikami00000000000000 faultCode SoftLayer_Account faultString Failed Call apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/empty.xml0000664000175000017500000000006012701023453026400 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreate0000664000175000017500000117065512701023453034005 0ustar kamikami00000000000000 blockDevices itemPrice hourlyRecurringFee 0 recurringFee 0 item description 25 GB (SAN) template blockDevices device 0 diskImage capacity 25 localDiskFlag 0 itemPrice hourlyRecurringFee .025 recurringFee 18 item description 100 GB (SAN) template blockDevices device 0 diskImage capacity 100 localDiskFlag 0 itemPrice hourlyRecurringFee .012 recurringFee 8 item description 10 GB (SAN) template blockDevices device 2 diskImage capacity 10 localDiskFlag 0 itemPrice hourlyRecurringFee .015 recurringFee 10 item description 20 GB (SAN) template blockDevices device 2 diskImage capacity 20 localDiskFlag 0 itemPrice hourlyRecurringFee .016 recurringFee 11 item description 25 GB (SAN) template blockDevices device 2 diskImage capacity 25 localDiskFlag 0 itemPrice hourlyRecurringFee .017 recurringFee 12 item description 30 GB (SAN) template blockDevices device 2 diskImage capacity 30 localDiskFlag 0 itemPrice hourlyRecurringFee .019 recurringFee 14 item description 40 GB (SAN) template blockDevices device 2 diskImage capacity 40 localDiskFlag 0 itemPrice hourlyRecurringFee .021 recurringFee 16 item description 50 GB (SAN) template blockDevices device 2 diskImage capacity 50 localDiskFlag 0 itemPrice hourlyRecurringFee .023 recurringFee 17 item description 75 GB (SAN) template blockDevices device 2 diskImage capacity 75 localDiskFlag 0 itemPrice hourlyRecurringFee .025 recurringFee 18 item description 100 GB (SAN) template blockDevices device 2 diskImage capacity 100 localDiskFlag 0 itemPrice hourlyRecurringFee .028 recurringFee 19 item description 125 GB (SAN) template blockDevices device 2 diskImage capacity 125 localDiskFlag 0 itemPrice hourlyRecurringFee .03 recurringFee 20 item description 150 GB (SAN) template blockDevices device 2 diskImage capacity 150 localDiskFlag 0 itemPrice hourlyRecurringFee .035 recurringFee 21 item description 175 GB (SAN) template blockDevices device 2 diskImage capacity 175 localDiskFlag 0 itemPrice hourlyRecurringFee .04 recurringFee 22 item description 200 GB (SAN) template blockDevices device 2 diskImage capacity 200 localDiskFlag 0 itemPrice hourlyRecurringFee .05 recurringFee 25 item description 250 GB (SAN) template blockDevices device 2 diskImage capacity 250 localDiskFlag 0 itemPrice hourlyRecurringFee .06 recurringFee 32.5 item description 300 GB (SAN) template blockDevices device 2 diskImage capacity 300 localDiskFlag 0 itemPrice hourlyRecurringFee .07 recurringFee 40 item description 350 GB (SAN) template blockDevices device 2 diskImage capacity 350 localDiskFlag 0 itemPrice hourlyRecurringFee .08 recurringFee 45 item description 400 GB (SAN) template blockDevices device 2 diskImage capacity 400 localDiskFlag 0 itemPrice hourlyRecurringFee .1 recurringFee 50 item description 500 GB (SAN) template blockDevices device 2 diskImage capacity 500 localDiskFlag 0 itemPrice hourlyRecurringFee .15 recurringFee 75 item description 750 GB (SAN) template blockDevices device 2 diskImage capacity 750 localDiskFlag 0 itemPrice hourlyRecurringFee .2 recurringFee 100 item description 1,000 GB (SAN) template blockDevices device 2 diskImage capacity 1000 localDiskFlag 0 itemPrice hourlyRecurringFee .25 recurringFee 150 item description 1,500 GB (SAN) template blockDevices device 2 diskImage capacity 1500 localDiskFlag 0 itemPrice hourlyRecurringFee .3 recurringFee 200 item description 2,000 GB (SAN) template blockDevices device 2 diskImage capacity 2000 localDiskFlag 0 itemPrice hourlyRecurringFee .012 recurringFee 8 item description 10 GB (SAN) template blockDevices device 3 diskImage capacity 10 localDiskFlag 0 itemPrice hourlyRecurringFee .015 recurringFee 10 item description 20 GB (SAN) template blockDevices device 3 diskImage capacity 20 localDiskFlag 0 itemPrice hourlyRecurringFee .016 recurringFee 11 item description 25 GB (SAN) template blockDevices device 3 diskImage capacity 25 localDiskFlag 0 itemPrice hourlyRecurringFee .017 recurringFee 12 item description 30 GB (SAN) template blockDevices device 3 diskImage capacity 30 localDiskFlag 0 itemPrice hourlyRecurringFee .019 recurringFee 14 item description 40 GB (SAN) template blockDevices device 3 diskImage capacity 40 localDiskFlag 0 itemPrice hourlyRecurringFee .021 recurringFee 16 item description 50 GB (SAN) template blockDevices device 3 diskImage capacity 50 localDiskFlag 0 itemPrice hourlyRecurringFee .023 recurringFee 17 item description 75 GB (SAN) template blockDevices device 3 diskImage capacity 75 localDiskFlag 0 itemPrice hourlyRecurringFee .025 recurringFee 18 item description 100 GB (SAN) template blockDevices device 3 diskImage capacity 100 localDiskFlag 0 itemPrice hourlyRecurringFee .028 recurringFee 19 item description 125 GB (SAN) template blockDevices device 3 diskImage capacity 125 localDiskFlag 0 itemPrice hourlyRecurringFee .03 recurringFee 20 item description 150 GB (SAN) template blockDevices device 3 diskImage capacity 150 localDiskFlag 0 itemPrice hourlyRecurringFee .035 recurringFee 21 item description 175 GB (SAN) template blockDevices device 3 diskImage capacity 175 localDiskFlag 0 itemPrice hourlyRecurringFee .04 recurringFee 22 item description 200 GB (SAN) template blockDevices device 3 diskImage capacity 200 localDiskFlag 0 itemPrice hourlyRecurringFee .05 recurringFee 25 item description 250 GB (SAN) template blockDevices device 3 diskImage capacity 250 localDiskFlag 0 itemPrice hourlyRecurringFee .06 recurringFee 32.5 item description 300 GB (SAN) template blockDevices device 3 diskImage capacity 300 localDiskFlag 0 itemPrice hourlyRecurringFee .07 recurringFee 40 item description 350 GB (SAN) template blockDevices device 3 diskImage capacity 350 localDiskFlag 0 itemPrice hourlyRecurringFee .08 recurringFee 45 item description 400 GB (SAN) template blockDevices device 3 diskImage capacity 400 localDiskFlag 0 itemPrice hourlyRecurringFee .1 recurringFee 50 item description 500 GB (SAN) template blockDevices device 3 diskImage capacity 500 localDiskFlag 0 itemPrice hourlyRecurringFee .15 recurringFee 75 item description 750 GB (SAN) template blockDevices device 3 diskImage capacity 750 localDiskFlag 0 itemPrice hourlyRecurringFee .2 recurringFee 100 item description 1,000 GB (SAN) template blockDevices device 3 diskImage capacity 1000 localDiskFlag 0 itemPrice hourlyRecurringFee .25 recurringFee 150 item description 1,500 GB (SAN) template blockDevices device 3 diskImage capacity 1500 localDiskFlag 0 itemPrice hourlyRecurringFee .3 recurringFee 200 item description 2,000 GB (SAN) template blockDevices device 3 diskImage capacity 2000 localDiskFlag 0 itemPrice hourlyRecurringFee .012 recurringFee 8 item description 10 GB (SAN) template blockDevices device 4 diskImage capacity 10 localDiskFlag 0 itemPrice hourlyRecurringFee .015 recurringFee 10 item description 20 GB (SAN) template blockDevices device 4 diskImage capacity 20 localDiskFlag 0 itemPrice hourlyRecurringFee .016 recurringFee 11 item description 25 GB (SAN) template blockDevices device 4 diskImage capacity 25 localDiskFlag 0 itemPrice hourlyRecurringFee .017 recurringFee 12 item description 30 GB (SAN) template blockDevices device 4 diskImage capacity 30 localDiskFlag 0 itemPrice hourlyRecurringFee .019 recurringFee 14 item description 40 GB (SAN) template blockDevices device 4 diskImage capacity 40 localDiskFlag 0 itemPrice hourlyRecurringFee .021 recurringFee 16 item description 50 GB (SAN) template blockDevices device 4 diskImage capacity 50 localDiskFlag 0 itemPrice hourlyRecurringFee .023 recurringFee 17 item description 75 GB (SAN) template blockDevices device 4 diskImage capacity 75 localDiskFlag 0 itemPrice hourlyRecurringFee .025 recurringFee 18 item description 100 GB (SAN) template blockDevices device 4 diskImage capacity 100 localDiskFlag 0 itemPrice hourlyRecurringFee .028 recurringFee 19 item description 125 GB (SAN) template blockDevices device 4 diskImage capacity 125 localDiskFlag 0 itemPrice hourlyRecurringFee .03 recurringFee 20 item description 150 GB (SAN) template blockDevices device 4 diskImage capacity 150 localDiskFlag 0 itemPrice hourlyRecurringFee .035 recurringFee 21 item description 175 GB (SAN) template blockDevices device 4 diskImage capacity 175 localDiskFlag 0 itemPrice hourlyRecurringFee .04 recurringFee 22 item description 200 GB (SAN) template blockDevices device 4 diskImage capacity 200 localDiskFlag 0 itemPrice hourlyRecurringFee .05 recurringFee 25 item description 250 GB (SAN) template blockDevices device 4 diskImage capacity 250 localDiskFlag 0 itemPrice hourlyRecurringFee .06 recurringFee 32.5 item description 300 GB (SAN) template blockDevices device 4 diskImage capacity 300 localDiskFlag 0 itemPrice hourlyRecurringFee .07 recurringFee 40 item description 350 GB (SAN) template blockDevices device 4 diskImage capacity 350 localDiskFlag 0 itemPrice hourlyRecurringFee .08 recurringFee 45 item description 400 GB (SAN) template blockDevices device 4 diskImage capacity 400 localDiskFlag 0 itemPrice hourlyRecurringFee .1 recurringFee 50 item description 500 GB (SAN) template blockDevices device 4 diskImage capacity 500 localDiskFlag 0 itemPrice hourlyRecurringFee .15 recurringFee 75 item description 750 GB (SAN) template blockDevices device 4 diskImage capacity 750 localDiskFlag 0 itemPrice hourlyRecurringFee .2 recurringFee 100 item description 1,000 GB (SAN) template blockDevices device 4 diskImage capacity 1000 localDiskFlag 0 itemPrice hourlyRecurringFee .25 recurringFee 150 item description 1,500 GB (SAN) template blockDevices device 4 diskImage capacity 1500 localDiskFlag 0 itemPrice hourlyRecurringFee .3 recurringFee 200 item description 2,000 GB (SAN) template blockDevices device 4 diskImage capacity 2000 localDiskFlag 0 itemPrice hourlyRecurringFee .012 recurringFee 8 item description 10 GB (SAN) template blockDevices device 5 diskImage capacity 10 localDiskFlag 0 itemPrice hourlyRecurringFee .015 recurringFee 10 item description 20 GB (SAN) template blockDevices device 5 diskImage capacity 20 localDiskFlag 0 itemPrice hourlyRecurringFee .016 recurringFee 11 item description 25 GB (SAN) template blockDevices device 5 diskImage capacity 25 localDiskFlag 0 itemPrice hourlyRecurringFee .017 recurringFee 12 item description 30 GB (SAN) template blockDevices device 5 diskImage capacity 30 localDiskFlag 0 itemPrice hourlyRecurringFee .019 recurringFee 14 item description 40 GB (SAN) template blockDevices device 5 diskImage capacity 40 localDiskFlag 0 itemPrice hourlyRecurringFee .021 recurringFee 16 item description 50 GB (SAN) template blockDevices device 5 diskImage capacity 50 localDiskFlag 0 itemPrice hourlyRecurringFee .023 recurringFee 17 item description 75 GB (SAN) template blockDevices device 5 diskImage capacity 75 localDiskFlag 0 itemPrice hourlyRecurringFee .025 recurringFee 18 item description 100 GB (SAN) template blockDevices device 5 diskImage capacity 100 localDiskFlag 0 itemPrice hourlyRecurringFee .028 recurringFee 19 item description 125 GB (SAN) template blockDevices device 5 diskImage capacity 125 localDiskFlag 0 itemPrice hourlyRecurringFee .03 recurringFee 20 item description 150 GB (SAN) template blockDevices device 5 diskImage capacity 150 localDiskFlag 0 itemPrice hourlyRecurringFee .035 recurringFee 21 item description 175 GB (SAN) template blockDevices device 5 diskImage capacity 175 localDiskFlag 0 itemPrice hourlyRecurringFee .04 recurringFee 22 item description 200 GB (SAN) template blockDevices device 5 diskImage capacity 200 localDiskFlag 0 itemPrice hourlyRecurringFee .05 recurringFee 25 item description 250 GB (SAN) template blockDevices device 5 diskImage capacity 250 localDiskFlag 0 itemPrice hourlyRecurringFee .06 recurringFee 32.5 item description 300 GB (SAN) template blockDevices device 5 diskImage capacity 300 localDiskFlag 0 itemPrice hourlyRecurringFee .07 recurringFee 40 item description 350 GB (SAN) template blockDevices device 5 diskImage capacity 350 localDiskFlag 0 itemPrice hourlyRecurringFee .08 recurringFee 45 item description 400 GB (SAN) template blockDevices device 5 diskImage capacity 400 localDiskFlag 0 itemPrice hourlyRecurringFee .1 recurringFee 50 item description 500 GB (SAN) template blockDevices device 5 diskImage capacity 500 localDiskFlag 0 itemPrice hourlyRecurringFee .15 recurringFee 75 item description 750 GB (SAN) template blockDevices device 5 diskImage capacity 750 localDiskFlag 0 itemPrice hourlyRecurringFee .2 recurringFee 100 item description 1,000 GB (SAN) template blockDevices device 5 diskImage capacity 1000 localDiskFlag 0 itemPrice hourlyRecurringFee .25 recurringFee 150 item description 1,500 GB (SAN) template blockDevices device 5 diskImage capacity 1500 localDiskFlag 0 itemPrice hourlyRecurringFee .3 recurringFee 200 item description 2,000 GB (SAN) template blockDevices device 5 diskImage capacity 2000 localDiskFlag 0 itemPrice hourlyRecurringFee 0 recurringFee 0 item description 25 GB (LOCAL) template blockDevices device 0 diskImage capacity 25 localDiskFlag 1 itemPrice hourlyRecurringFee .01 recurringFee 7 item description 100 GB (LOCAL) template blockDevices device 0 diskImage capacity 100 localDiskFlag 1 itemPrice hourlyRecurringFee .005 recurringFee 3.25 item description 25 GB (LOCAL) template blockDevices device 2 diskImage capacity 25 localDiskFlag 1 itemPrice hourlyRecurringFee .01 recurringFee 7 item description 100 GB (LOCAL) template blockDevices device 2 diskImage capacity 100 localDiskFlag 1 itemPrice hourlyRecurringFee .015 recurringFee 9 item description 150 GB (LOCAL) template blockDevices device 2 diskImage capacity 150 localDiskFlag 1 itemPrice hourlyRecurringFee .02 recurringFee 14 item description 200 GB (LOCAL) template blockDevices device 2 diskImage capacity 200 localDiskFlag 1 itemPrice hourlyRecurringFee .03 recurringFee 21 item description 300 GB (LOCAL) template blockDevices device 2 diskImage capacity 300 localDiskFlag 1 datacenters template datacenter name ams01 template datacenter name dal05 template datacenter name dal06 template datacenter name sea01 template datacenter name sjc01 template datacenter name sng01 template datacenter name wdc01 memory itemPrice hourlyRecurringFee .03 recurringFee 21 item description 1 GB template maxMemory 1024 itemPrice hourlyRecurringFee .06 recurringFee 42 item description 2 GB template maxMemory 2048 itemPrice hourlyRecurringFee .085 recurringFee 59.5 item description 3 GB template maxMemory 3072 itemPrice hourlyRecurringFee .11 recurringFee 77 item description 4 GB template maxMemory 4096 itemPrice hourlyRecurringFee .14 recurringFee 98 item description 5 GB template maxMemory 5120 itemPrice hourlyRecurringFee .165 recurringFee 115.5 item description 6 GB template maxMemory 6144 itemPrice hourlyRecurringFee .195 recurringFee 136.5 item description 7 GB template maxMemory 7168 itemPrice hourlyRecurringFee .215 recurringFee 150.5 item description 8 GB template maxMemory 8192 itemPrice hourlyRecurringFee .245 recurringFee 171.5 item description 9 GB template maxMemory 9216 itemPrice hourlyRecurringFee .265 recurringFee 185.5 item description 10 GB template maxMemory 10240 itemPrice hourlyRecurringFee .29 recurringFee 203 item description 11 GB template maxMemory 11264 itemPrice hourlyRecurringFee .31 recurringFee 217 item description 12 GB template maxMemory 12288 itemPrice hourlyRecurringFee .33 recurringFee 231 item description 13 GB template maxMemory 13312 itemPrice hourlyRecurringFee .345 recurringFee 241.5 item description 14 GB template maxMemory 14336 itemPrice hourlyRecurringFee .36 recurringFee 252 item description 15 GB template maxMemory 15360 itemPrice hourlyRecurringFee .375 recurringFee 262.5 item description 16 GB template maxMemory 16384 itemPrice hourlyRecurringFee .76 recurringFee 525 item description 32 GB template maxMemory 32768 itemPrice hourlyRecurringFee 3.5 recurringFee 2500 item description 48 GB template maxMemory 49152 networkComponents itemPrice hourlyRecurringFee 0 recurringFee 0 item description 10 Mbps Public & Private Networks template networkComponents maxSpeed 10 itemPrice hourlyRecurringFee 0 recurringFee 0 item description 100 Mbps Private Network template networkComponents maxSpeed 100 itemPrice hourlyRecurringFee .04 recurringFee 20 item description 1 Gbps Public & Private Networks template networkComponents maxSpeed 1000 operatingSystems itemPrice hourlyRecurringFee 0 recurringFee 0 item description CentOS 6.0 - Minimal Install (64 bit) template operatingSystemReferenceCode CENTOS_6_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description CentOS 6.0 - Minimal Install (32 bit) template operatingSystemReferenceCode CENTOS_6_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description CentOS 5 - Minimal Install (64 bit) template operatingSystemReferenceCode CENTOS_5_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description CentOS 5 - Minimal Install (32 bit) template operatingSystemReferenceCode CENTOS_5_32 itemPrice recurringFee 12 item description CloudLinux 6 (64 bit) template operatingSystemReferenceCode CLOUDLINUX_6_64 itemPrice recurringFee 12 item description CloudLinux 6 (32 bit) template operatingSystemReferenceCode CLOUDLINUX_6_32 itemPrice recurringFee 12 item description CloudLinux 5 (64 bit) template operatingSystemReferenceCode CLOUDLINUX_5_64 itemPrice recurringFee 12 item description CloudLinux 5 (32 bit) template operatingSystemReferenceCode CLOUDLINUX_5_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (64 bit) template operatingSystemReferenceCode DEBIAN_7_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (32 bit) template operatingSystemReferenceCode DEBIAN_7_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (64 bit) template operatingSystemReferenceCode DEBIAN_6_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (32 bit) template operatingSystemReferenceCode DEBIAN_6_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit) template operatingSystemReferenceCode DEBIAN_5_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit) template operatingSystemReferenceCode DEBIAN_5_32 itemPrice hourlyRecurringFee .08 recurringFee 45 item description Red Hat Enterprise Linux 6 - Minimal Install (64 bit) (1 - 4 Core) template operatingSystemReferenceCode REDHAT_6_64 itemPrice hourlyRecurringFee .12 recurringFee 100 item description Red Hat Enterprise Linux 6 - Minimal Install (64 bit) (5 - 100 Core) id -3839 softwareDescriptionId upgradeItemId softwareDescription controlPanel 0 id 795 longDescription Redhat / EL / 6.0-64 Minimal for CCI manufacturer Redhat name EL operatingSystem 1 referenceCode REDHAT_6_64 upgradeSoftwareDescriptionId upgradeSwDescId version 6.0-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root template operatingSystemReferenceCode REDHAT_6_64 itemPrice hourlyRecurringFee .08 recurringFee 45 item description Red Hat Enterprise Linux 6 - Minimal Install (32 bit) (1 - 4 Core) template operatingSystemReferenceCode REDHAT_6_32 itemPrice hourlyRecurringFee .12 recurringFee 100 item description Red Hat Enterprise Linux 6 - Minimal Install (32 bit) (5 - 100 Core) id -3837 softwareDescriptionId upgradeItemId softwareDescription controlPanel 0 id 796 longDescription Redhat / EL / 6.0-32 Minimal for CCI manufacturer Redhat name EL operatingSystem 1 referenceCode REDHAT_6_32 upgradeSoftwareDescriptionId upgradeSwDescId version 6.0-32 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root template operatingSystemReferenceCode REDHAT_6_32 itemPrice hourlyRecurringFee .08 recurringFee 45 item description Red Hat Enterprise Linux 5 - Minimal Install (64 bit) (1 - 4 Core) template operatingSystemReferenceCode REDHAT_5_64 itemPrice hourlyRecurringFee .12 recurringFee 100 item description Red Hat Enterprise Linux 5 - Minimal Install (64 bit) (5 - 100 Core) id -880 softwareDescriptionId upgradeItemId softwareDescription controlPanel 0 id 664 longDescription Redhat / EL / 5.5-64 Minimal for CCI manufacturer Redhat name EL operatingSystem 1 referenceCode REDHAT_5_64 upgradeSoftwareDescriptionId upgradeSwDescId version 5.5-64 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root template operatingSystemReferenceCode REDHAT_5_64 itemPrice hourlyRecurringFee .08 recurringFee 45 item description Red Hat Enterprise Linux 5 - Minimal Install (32 bit) (1 - 4 Core) template operatingSystemReferenceCode REDHAT_5_32 itemPrice hourlyRecurringFee .12 recurringFee 100 item description Red Hat Enterprise Linux 5 - Minimal Install (32 bit) (5 - 100 Core) id -879 softwareDescriptionId upgradeItemId softwareDescription controlPanel 0 id 662 longDescription Redhat / EL / 5.5-32 Minimal for CCI manufacturer Redhat name EL operatingSystem 1 referenceCode REDHAT_5_32 upgradeSoftwareDescriptionId 927 upgradeSwDescId 927 version 5.5-32 Minimal for CCI virtualLicense 0 virtualizationPlatform 0 requiredUser root template operatingSystemReferenceCode REDHAT_5_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit) template operatingSystemReferenceCode UBUNTU_12_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (32 bit) template operatingSystemReferenceCode UBUNTU_12_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (64 bit) template operatingSystemReferenceCode UBUNTU_10_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (32 bit) template operatingSystemReferenceCode UBUNTU_10_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit) template operatingSystemReferenceCode UBUNTU_8_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit) template operatingSystemReferenceCode UBUNTU_8_32 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Vyatta 6.6 Community Edition (64 bit) template operatingSystemReferenceCode VYATTACE_6.6_64 itemPrice hourlyRecurringFee 0 recurringFee 0 item description Vyatta 6.5 Community Edition (64 bit) template operatingSystemReferenceCode VYATTACE_6.5_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2012 Standard Edition (64 bit) template operatingSystemReferenceCode WIN_2012-STD_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2012 Datacenter Edition (64bit) template operatingSystemReferenceCode WIN_2012-DC_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 Standard Edition SP2 (64bit) template operatingSystemReferenceCode WIN_2008-STD-SP2_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 Standard Edition SP2 (32bit) template operatingSystemReferenceCode WIN_2008-STD-SP2_32 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 Standard SP1 with R2 (64 bit) template operatingSystemReferenceCode WIN_2008-STD-R2-SP1_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 R2 Standard Edition (64bit) template operatingSystemReferenceCode WIN_2008-STD-R2_64 itemPrice hourlyRecurringFee .1 recurringFee 50 item description Windows Server 2008 Enterprise Edition SP2 (64bit) template operatingSystemReferenceCode WIN_2008-ENT-SP2_64 itemPrice hourlyRecurringFee .1 recurringFee 50 item description Windows Server 2008 Enterprise Edition SP2 (32bit) template operatingSystemReferenceCode WIN_2008-ENT-SP2_32 itemPrice hourlyRecurringFee .1 recurringFee 50 item description Windows Server 2008 R2 Enterprise Edition (64bit) template operatingSystemReferenceCode WIN_2008-ENT-R2_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 Datacenter Edition SP2 (64bit) template operatingSystemReferenceCode WIN_2008-DC-SP2_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2008 R2 Datacenter Edition (64bit) template operatingSystemReferenceCode WIN_2008-DC-R2_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2003 Standard SP2 with R2 (64 bit) template operatingSystemReferenceCode WIN_2003-STD-SP2-5_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2003 Standard SP2 with R2 (32 bit) template operatingSystemReferenceCode WIN_2003-STD-SP2-5_32 itemPrice hourlyRecurringFee .05 recurringFee 50 item description Windows Server 2003 Enterprise SP2 with R2 (64 bit) template operatingSystemReferenceCode WIN_2003-ENT-SP2-5_64 itemPrice hourlyRecurringFee .05 recurringFee 50 item description Windows Server 2003 Enterprise SP2 with R2 (32 bit) template operatingSystemReferenceCode WIN_2003-ENT-SP2-5_32 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2003 Datacenter SP2 with R2 (64 bit) template operatingSystemReferenceCode WIN_2003-DC-SP2-1_64 itemPrice hourlyRecurringFee .05 recurringFee 20 item description Windows Server 2003 Datacenter SP2 with R2 (32 bit) template operatingSystemReferenceCode WIN_2003-DC-SP2-1_32 processors itemPrice hourlyRecurringFee .07 recurringFee 29 item description 1 x 2.0 GHz Core template startCpus 1 itemPrice hourlyRecurringFee .14 recurringFee 78 item description 2 x 2.0 GHz Cores template startCpus 2 itemPrice hourlyRecurringFee .205 recurringFee 123.5 item description 3 x 2.0 GHz Cores template startCpus 3 itemPrice hourlyRecurringFee .265 recurringFee 165.5 item description 4 x 2.0 GHz Cores template startCpus 4 itemPrice hourlyRecurringFee .325 recurringFee 207.5 item description 5 x 2.0 GHz Cores template startCpus 5 itemPrice hourlyRecurringFee .39 recurringFee 253 item description 6 x 2.0 GHz Cores template startCpus 6 itemPrice hourlyRecurringFee .445 recurringFee 291.5 item description 7 x 2.0 GHz Cores template startCpus 7 itemPrice hourlyRecurringFee .495 recurringFee 326.5 item description 8 x 2.0 GHz Cores template startCpus 8 itemPrice hourlyRecurringFee .54 recurringFee 358 item description 9 x 2.0 GHz Cores template startCpus 9 itemPrice hourlyRecurringFee .59 recurringFee 393 item description 10 x 2.0 GHz Cores template startCpus 10 itemPrice hourlyRecurringFee .635 recurringFee 424.5 item description 11 x 2.0 GHz Cores template startCpus 11 itemPrice hourlyRecurringFee .66 recurringFee 442 item description 12 x 2.0 GHz Cores template startCpus 12 itemPrice hourlyRecurringFee .695 recurringFee 466.5 item description 13 x 2.0 GHz Cores template startCpus 13 itemPrice hourlyRecurringFee .72 recurringFee 484 item description 14 x 2.0 GHz Cores template startCpus 14 itemPrice hourlyRecurringFee .74 recurringFee 498 item description 15 x 2.0 GHz Cores template startCpus 15 itemPrice hourlyRecurringFee .75 recurringFee 505 item description 16 x 2.0 GHz Cores template startCpus 16 itemPrice hourlyRecurringFee .22 recurringFee 159 item description Private 1 x 2.0 GHz Core template dedicatedAccountHostOnlyFlag 1 startCpus 1 itemPrice hourlyRecurringFee .3 recurringFee 199 item description Private 2 x 2.0 GHz Cores template dedicatedAccountHostOnlyFlag 1 startCpus 2 itemPrice hourlyRecurringFee .44 recurringFee 299 item description Private 4 x 2.0 GHz Cores template dedicatedAccountHostOnlyFlag 1 startCpus 4 itemPrice hourlyRecurringFee .58 recurringFee 399 item description Private 8 x 2.0 GHz Cores template dedicatedAccountHostOnlyFlag 1 startCpus 8 ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_delete0000664000175000017500000000054412701023453033767 0ustar kamikami00000000000000 status success ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObj0000664000175000017500000000470612701023453033770 0ustar kamikami00000000000000 accountId 12345 createDate 2013-01-01T19:31:22-06:00 dedicatedAccountHostOnlyFlag 0 domain domain.com fullyQualifiedDomainName hostname.domain.com hostname hostname id 123456 lastPowerStateId lastVerifiedDate maxCpu 2 maxCpuUnits CORE maxMemory 2048 metricPollDate modifyDate privateNetworkOnlyFlag 0 startCpus 2 statusId 1001 globalIdentifier f47ac10b-58cc-4372-a567-0e02b2c3d479 managedResourceFlag 0 powerState keyName HALTED name Halted apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/fail.xml0000664000175000017500000000056512701023453026167 0ustar kamikami00000000000000 faultCode fail faultString Failed Call ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getD0000664000175000017500000000417212701023453033720 0ustar kamikami00000000000000 id 2 name dal05 longName Dallas 5 id 3 name dal01 longName Dallas id 18171 name sea01 longName Seattle id 37473 name wdc01 longName Washington, DC id 12345 name newcity01 longName New City ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObj0000664000175000017500000000172612701023453033742 0ustar kamikami00000000000000 id 1 key ssh-key label test1 fingerprint 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/0000775000175000017500000000000013160535107025307 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json0000664000175000017500000000012312701023453031102 0ustar kamikami00000000000000[{"error_point": null, "error_type": "backend", "error_message": "unknown error"}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json0000664000175000017500000000073212701023453030631 0ustar kamikami00000000000000{ "drives": { "dssd": { "max_size": 8858013190752, "min_size": 536870912 } }, "servers": { "cpu": { "max": 80000, "min": 250 }, "cpu_per_smp": { "max": 2200, "min": 1000 }, "mem": { "max": 137438953472, "min": 268435456 }, "smp": { "max": 40, "min": 1 } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json0000664000175000017500000000356312701023453030714 0ustar kamikami00000000000000{ "balance": { "balance": "378.74599035374868510600", "currency": "USD" }, "usage": { "cpu": { "burst": 0, "subscribed": 0, "using": 0 }, "dssd": { "burst": 13958643712, "subscribed": 0, "using": 13958643712 }, "ip": { "burst": 0, "subscribed": 0, "using": 0 }, "mem": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_7jq_00341": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_7nq_00302": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_lwa_00135": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_p71_01031": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_p73_04837": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_p73_04837_core": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_tfa_00009": { "burst": 0, "subscribed": 0, "using": 0 }, "msft_tfa_00523": { "burst": 0, "subscribed": 0, "using": 0 }, "sms": { "burst": 0, "subscribed": 0, "using": 0 }, "ssd": { "burst": 0, "subscribed": 0, "using": 0 }, "tx": { "burst": 0, "subscribed": 5368709120, "using": 0 }, "vlan": { "burst": 0, "subscribed": 0, "using": 0 } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json0000664000175000017500000002615212701023453031022 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 4 }, "objects": [ { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": { "arch": "64", "category": "general", "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", "favourite": "False", "image_type": "preinst", "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", "os": "linux", "paid": "False", "url": "http://www.debian.org/" }, "mounted_on": [], "name": "test node 2-drive", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/9d1d2cf3-08c1-462f-8485-f4b073560809/", "runtime": { "snapshots_allocated_size": 0, "storage_type": "dssd" }, "size": 13958643712, "snapshots": [], "status": "unmounted", "storage_type": "dssd", "tags": [], "uuid": "9d1d2cf3-08c1-462f-8485-f4b073560809" }, { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": { "arch": "64", "category": "general", "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", "favourite": "False", "image_type": "preinst", "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", "os": "linux", "paid": "False", "url": "http://www.debian.org/" }, "mounted_on": [], "name": "test node 3-drive", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/c9799969-0016-4298-a72c-93cabc067c6e/", "runtime": { "snapshots_allocated_size": 0, "storage_type": "dssd" }, "size": 13958643712, "snapshots": [], "status": "unmounted", "storage_type": "dssd", "tags": [], "uuid": "c9799969-0016-4298-a72c-93cabc067c6e" }, { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": { "arch": "64", "category": "general", "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", "favourite": "False", "image_type": "preinst", "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", "os": "linux", "paid": "False", "url": "http://www.debian.org/" }, "mounted_on": [], "name": "test node 2-drive", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/967c8bbd-ca32-42db-a9b8-95e270e0aae1/", "runtime": { "snapshots_allocated_size": 0, "storage_type": "dssd" }, "size": 13958643712, "snapshots": [], "status": "unmounted", "storage_type": "dssd", "tags": [], "uuid": "967c8bbd-ca32-42db-a9b8-95e270e0aae1" }, { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": { "arch": "64", "category": "general", "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", "favourite": "False", "image_type": "preinst", "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", "os": "linux", "paid": "False", "url": "http://www.debian.org/" }, "mounted_on": [ { "resource_uri": "/api/2.0/servers/e06cf7b3-ea46-4d38-87e0-3f918c4648d3/", "uuid": "e06cf7b3-ea46-4d38-87e0-3f918c4648d3" } ], "name": "test node 2-drive", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/3e166706-188c-4f38-b8d5-7fc10a5019a1/", "runtime": { "snapshots_allocated_size": 0, "storage_type": "dssd" }, "size": 13958643712, "snapshots": [], "status": "mounted", "storage_type": "dssd", "tags": [], "uuid": "3e166706-188c-4f38-b8d5-7fc10a5019a1" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json0000664000175000017500000000160012701023453030647 0ustar kamikami00000000000000{ "objects": [ { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "cloned drive", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", "runtime": { "snapshots_allocated_size": 0, "storage_type": null }, "size": 2097152000, "snapshots": [], "status": "creating", "storage_type": null, "tags": [], "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json0000664000175000017500000010255612701023453030172 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 89 }, "objects": [ { "affinities": [], "allow_multimount": false, "arch": "32", "category": [ "security" ], "description": "IPCop 2.0.2 - i486 - CD.\r\nThe IPCop Firewall is a Linux firewall distribution.", "favourite": true, "image_type": "install", "install_notes": "1. Attach the CD.\\n Please be aware that the CD needs to be attached to the server to IDE. \\n \\n 2. Attach a Drive.\\n Please be aware that the minimum drive size where you are going to install the OS should be 5 GB. \\n \\n 3. Connecting to your server via VNC.\\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 4. Minimum Hardware Requirements.\\n The recommended minimum hardware requirements as publishes by ipcop.org are: 32MB RAM and 386MHz CPU \\n We recommend specifying a higher level of RAM for a better user experience.\\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "IPCop 2.0.2", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/6eca8d96-44bc-4637-af97-77ccd7ba4144/", "size": 1000000000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.ipcop.org/", "uuid": "6eca8d96-44bc-4637-af97-77ccd7ba4144" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "FreeBSD-8.4-RELEASE-amd64-disc1", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD.\\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \\n\r\n\\n\r\n2. Attach a Drive.\\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \\n\r\n\\n\r\n3. Connecting to your server via VNC.\\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \\n\r\n\\n\r\n4. Minimum Hardware Requirements.\\n\r\nThe recommended minimum hardware requirements as publishes by freebsd.org are: 0.5 GB RAM and 0.5\\n GHz CPU We recommend specifying a higher level of RAM for a better user experience.\\n\r\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "FreeBSD 8.4", "os": "other", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/c143724d-4d40-4871-bc2c-5120b4263ab3/", "size": 536870912, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.freebsd.org/", "uuid": "c143724d-4d40-4871-bc2c-5120b4263ab3" }, { "affinities": [], "allow_multimount": false, "arch": "32", "category": [ "general" ], "description": "Ubuntu 12.04 Desktop - 32bit - Install CD", "favourite": true, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Ubuntu 12.04 Desktop", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/703e693e-056d-4cd6-9531-36ec045fee7c/", "size": 1000000000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.ubuntu.com/", "uuid": "703e693e-056d-4cd6-9531-36ec045fee7c" }, { "affinities": [], "allow_multimount": false, "arch": "32", "category": [ "general" ], "description": "Ubuntu 12.04 Server - 32bit - Install CD", "favourite": true, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. \r\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Ubuntu 12.04 Server", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/36b01118-55f4-454f-92ee-578eb6d99867/", "size": 1000000000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.ubuntu.com/", "uuid": "36b01118-55f4-454f-92ee-578eb6d99867" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Windows Server 2012 Standard - 64bit Install CD", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD \\n Please be aware that the CD needs to be attached to the server to IDE. \\n \\n 2. Attach a Drive \\n Please be aware that the minimum drive size where you are going to install the OS should be 32 GB. \\n \\n 3. Connecting to your server via VNC \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 4. Minimum Hardware Requirements \\n The recommended minimum hardware requirements as publishes by Microsoft can be found through the following link: http://msdn.microsoft.com/library/dn303418.aspx We recommend specifying a higher level of RAM for a better user experience. \\n \\n 5. Enter your license key. \\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Windows Server Standard 2012", "os": "windows", "owner": null, "paid": true, "resource_uri": "/api/2.0/libdrives/b4273b6d-b227-4966-9e6e-5d48cebfcaa5/", "size": 3694116864, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.microsoft.com/", "uuid": "b4273b6d-b227-4966-9e6e-5d48cebfcaa5" }, { "affinities": [], "allow_multimount": false, "arch": "32", "category": [ "general" ], "description": "Knoppix 6 - 32bit - CD", "favourite": false, "image_type": "live", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Knoppix.net are: 512MB RAM and 512MHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Knoppix 6.4.3", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/ed47dc6d-4efd-4c05-b2f8-ab32ccf6de3b/", "size": 3670016000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://knoppix.net/", "uuid": "ed47dc6d-4efd-4c05-b2f8-ab32ccf6de3b" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "This image is produced by SixSq specifically to work with SlipStream.", "favourite": false, "image_type": "preinst", "install_notes": "", "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "ubuntu-10.04-toMP", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/5236b9ee-f735-42fd-a236-17558f9e12d3/", "size": 3221225472, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "5236b9ee-f735-42fd-a236-17558f9e12d3" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "CentOS 6.4 Server - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/30.", "favourite": false, "image_type": "preinst", "install_notes": "1. Clone the Image.\\n\r\nThe image needs to be cloned and then attached to the server.\\n\r\n\\n\r\n2. Connecting to your server via VNC.\\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI.\\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window.\\n\r\nc) Start to configure your server.\\n\r\n\\n\r\n3. Minimum Hardware Requirements.\\n\r\nThe recommended minimum hardware requirements as publishes by centos.org: 0.5GB RAM and 0.5GHz CPU\\n\r\n\\n\r\n4. Update your administrator password.\\n\r\nBy default and for security reasons \"root\" login is completely disabled (including for ssh)\\n\r\nUser \"cloudsigma\" with password \"cloudsigma\" is available for access.\\n\r\nPlease be aware that on the first login you will be asked to change the current password \"cloudsigma\" and set a secure password.\\n\r\nFor \"root\" please use the command \"sudo su\"\\n\r\n\\n\r\n5. Setup your CentOS.\\n\r\n\\n\r\n6. Configuring your Networking.\\n\r\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will assign the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\\n\r\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs.\\n", "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "CentOS 6.4 Server", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/cc08cd15-0c17-429f-bd56-15fefaca9d88/", "size": 10737418240, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.centos.org/", "uuid": "cc08cd15-0c17-429f-bd56-15fefaca9d88" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", "favourite": false, "image_type": "preinst", "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "Debian 6.0.7 Desktop", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/e848e216-76bb-4c1d-a376-54e4bdf54fe4/", "size": 10737418240, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.debian.org/", "uuid": "e848e216-76bb-4c1d-a376-54e4bdf54fe4" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "CentOS 6.3 - 64bit - DVD\r\n", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5 GB RAM and 0.5 GHz CPU We recommend specifying a higher level of RAM for a better user experience.", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "CentOS 6.3 DVD", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/65c99e46-296c-4d3f-ad1f-88dc06772624/", "size": 4289396736, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "65c99e46-296c-4d3f-ad1f-88dc06772624" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "CentOS 6.4 - 64bit - DVD\r\n", "favourite": false, "image_type": "install", "install_notes": "1. Attach the DVD. \\n\r\nPlease be aware that the DVD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5GB RAM and 0.5GHz CPU We recommend specifying a higher level of RAM for a better user experience.", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "CentOS 6.4 DVD", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/608e784a-5bff-4d25-afeb-bf7f998f56ef/", "size": 4353392640, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "608e784a-5bff-4d25-afeb-bf7f998f56ef" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Ubuntu 10.04.3 LTS Server Edition 64bit - CD", "favourite": true, "image_type": "install", "install_notes": "\r\n1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu,com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Ubuntu 10.04.3 Server", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/b69cbf27-d2a8-44f7-bc5a-3facc70021a8/", "size": 1000000000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://ubuntu.com/", "uuid": "b69cbf27-d2a8-44f7-bc5a-3facc70021a8" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Slackware-13.37 - 64bit - Install DVD", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Slackware.com are: 128MB RAM and 512MHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Slackware 13.37", "os": "linux", "owner": null, "paid": true, "resource_uri": "/api/2.0/libdrives/e209e588-8c06-44ce-8d57-c10df32c5876/", "size": 4613734400, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "e209e588-8c06-44ce-8d57-c10df32c5876" }, { "affinities": [], "allow_multimount": false, "arch": "32", "category": [ "general" ], "description": "RedHat Enterprise 6.0 - 32bit - Install DVD", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by RedHat.com are: 2GB RAM and 2GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "RedHat Enterprise 6.0", "os": "linux", "owner": null, "paid": true, "resource_uri": "/api/2.0/libdrives/a0638d80-bc5b-48a3-a7ba-dec2416239bf/", "size": 2936012800, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.redhat.com/", "uuid": "a0638d80-bc5b-48a3-a7ba-dec2416239bf" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "CentOS 6.3 SlipStream. This image is produced by SixSq specifically to work with SlipStream.", "favourite": false, "image_type": "preinst", "install_notes": "CentOS 6.3 SlipStream", "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "CentOS 6.3 for SlipStream", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/ac51c08f-d22b-4da8-9591-d343947f7455/", "size": 7516192768, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "ac51c08f-d22b-4da8-9591-d343947f7455" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Debian 6 - 64bit - CD", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Debian 6", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/98f810a3-b8f0-4441-89cd-02be4f2614d7/", "size": 676331520, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "98f810a3-b8f0-4441-89cd-02be4f2614d7" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Ubuntu 12.10 Server - 64bit - Install CD", "favourite": true, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. \r\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Ubuntu 12.10 Server", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/6afbda4b-1027-4405-9ae9-c7d32f097d31/", "size": 1000000000, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.ubuntu.com/", "uuid": "6afbda4b-1027-4405-9ae9-c7d32f097d31" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Debian 7.1 - 64bit - CD", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience.", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Debian 7.1", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/958bf26c-f25b-457d-aedb-a5cfb36bdeef/", "size": 536870912, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "958bf26c-f25b-457d-aedb-a5cfb36bdeef" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "CentOS 6.2 - 64bit - DVD\r\n", "favourite": false, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5 GB RAM and 0.5 GHz CPU We recommend specifying a higher level of RAM for a better user experience.\r\n", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "CentOS 6.2 DVD", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/d7cdd30f-2197-47ac-a878-c285c1e67426/", "size": 4423139328, "status": "unmounted", "storage_type": null, "tags": [], "url": "", "uuid": "d7cdd30f-2197-47ac-a878-c285c1e67426" }, { "affinities": [], "allow_multimount": false, "arch": "64", "category": [ "general" ], "description": "Debian 5.0 - 64bit - CD", "favourite": true, "image_type": "install", "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", "jobs": [], "licenses": [], "media": "cdrom", "meta": {}, "mounted_on": [], "name": "Debian 5.0", "os": "linux", "owner": null, "paid": false, "resource_uri": "/api/2.0/libdrives/794a068d-228c-4758-81f0-e1bc955a6cce/", "size": 4697620480, "status": "unmounted", "storage_type": null, "tags": [], "url": "http://www.debian.org/", "uuid": "794a068d-228c-4758-81f0-e1bc955a6cce" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json0000664000175000017500000000332412701023453031051 0ustar kamikami00000000000000{ "context": true, "cpu": 1000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [ { "boot_order": 1, "dev_channel": "0:0", "device": "ide", "drive": { "resource_uri": "/api/2.0/drives/f1e42abe-f7db-4dcc-b37e-e53aca7a3ba9/", "uuid": "f1e42abe-f7db-4dcc-b37e-e53aca7a3ba9" } } ], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 2147483648, "meta": { "description": "ddd", "ssh_public_key": "" }, "name": "test cloned node", "nics": [ { "boot_order": null, "firewall_policy": { "resource_uri": "/api/2.0/fwpolicies/461dfb8c-e641-43d7-a20e-32e2aa399086/", "uuid": "461dfb8c-e641-43d7-a20e-32e2aa399086" }, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:76:4c:96:e1:98", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/470ea5b9-3beb-4506-9cac-e3c63002480b/", "runtime": null, "smp": 1, "status": "cloning", "tags": [ { "resource_uri": "/api/2.0/tags/e60bb2d2-08d4-4255-adac-5faf87efcdd2/", "uuid": "e60bb2d2-08d4-4255-adac-5faf87efcdd2" } ], "uuid": "470ea5b9-3beb-4506-9cac-e3c63002480b", "vnc_password": "xxxx" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json0000664000175000017500000000024212701023453031554 0ustar kamikami00000000000000{ "action": "open_vnc", "result": "success", "uuid": "2e64e5e4-f31d-471a-ac1b-1ae079652e40", "vnc_url": "vnc://direct.lvs.cloudsigma.com:41111" } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.jso0000664000175000017500000000154412701023453033760 0ustar kamikami00000000000000{ "objects": [ { "meta": {}, "name": "test policy 2", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/fwpolicies/324819a5-7a5b-4231-957d-662a7429fb8c/", "rules": [ { "action": "accept", "comment": null, "direction": "out", "dst_ip": "127.0.0.1/32", "dst_port": null, "ip_proto": "tcp", "src_ip": "127.0.0.1/32", "src_port": null } ], "servers": [], "uuid": "324819a5-7a5b-4231-957d-662a7429fb8c" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json0000664000175000017500000000576012701023453031674 0ustar kamikami00000000000000{ "meta": { "limit": 0, "offset": 0, "total_count": 2 }, "objects": [ { "meta": {}, "name": "test policy", "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "resource_uri": "/api/2.0/fwpolicies/0e339282-0cb5-41ac-a9db-727fb62ff2dc/", "rules": [], "servers": [], "uuid": "0e339282-0cb5-41ac-a9db-727fb62ff2dc" }, { "meta": {}, "name": "My awesome policy", "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "resource_uri": "/api/2.0/fwpolicies/0c754845-f2ed-4347-8758-4315f7fa9c22/", "rules": [ { "action": "drop", "comment": "Drop traffic from the VM to IP address 23.0.0.0/32", "direction": "out", "dst_ip": "23.0.0.0/32", "dst_port": null, "ip_proto": "tcp", "src_ip": null, "src_port": null }, { "action": "accept", "comment": "Allow SSH traffic to the VM from our office in Dubai", "direction": "in", "dst_ip": null, "dst_port": "22", "ip_proto": "tcp", "src_ip": "172.66.32.0/24", "src_port": null }, { "action": "drop", "comment": "Drop all other SSH traffic to the VM", "direction": "in", "dst_ip": null, "dst_port": "22", "ip_proto": "tcp", "src_ip": null, "src_port": null }, { "action": "drop", "comment": "Drop all UDP traffic to the VM, not originating from 172.66.32.55", "direction": "in", "dst_ip": null, "dst_port": null, "ip_proto": "udp", "src_ip": "!172.66.32.55/32", "src_port": null }, { "action": "drop", "comment": "Drop any traffic, to the VM with destination port not between 1-1024", "direction": "in", "dst_ip": null, "dst_port": "!1:1024", "ip_proto": "tcp", "src_ip": null, "src_port": null } ], "servers": [], "uuid": "0c754845-f2ed-4347-8758-4315f7fa9c22" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json0000664000175000017500000000720012701023453031104 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 5 }, "objects": [ { "amount": "1", "auto_renew": true, "descendants": [], "discount_amount": null, "discount_percent": null, "end_time": "2014-02-20T11:12:34.930946+00:00", "id": "7272", "last_notification": null, "period": "345 days, 0:00:00", "price": "0E-20", "remaining": "1", "resource": "vlan", "resource_uri": "/api/2.0/subscriptions/7272/", "start_time": "2013-03-12T11:12:34.930946+00:00", "status": "active", "subscribed_object": "96537817-f4b6-496b-a861-e74192d3ccb0", "uuid": "509f8e27-1e64-49bb-aa5a-baec074b0210" }, { "amount": "1", "auto_renew": true, "descendants": [], "discount_amount": null, "discount_percent": null, "end_time": "2014-02-20T11:12:41.837474+00:00", "id": "7273", "last_notification": null, "period": "345 days, 0:00:00", "price": "0E-20", "remaining": "1", "resource": "ip", "resource_uri": "/api/2.0/subscriptions/7273/", "start_time": "2013-03-12T11:12:41.837474+00:00", "status": "active", "subscribed_object": "185.12.6.183", "uuid": "c2423c1a-8768-462c-bdc3-4ca09c1e650b" }, { "amount": "17179869184", "auto_renew": true, "descendants": [], "discount_amount": null, "discount_percent": null, "end_time": "2014-02-20T14:04:14.142181+00:00", "id": "3985", "last_notification": null, "period": "365 days, 0:00:00", "price": "0E-20", "remaining": "17179869184", "resource": "mem", "resource_uri": "/api/2.0/subscriptions/3985/", "start_time": "2013-02-20T14:04:14.142181+00:00", "status": "active", "subscribed_object": null, "uuid": "9bb117d3-4bc5-4e2d-a907-b20abd48eaf9" }, { "amount": "8000", "auto_renew": true, "descendants": [], "discount_amount": null, "discount_percent": null, "end_time": "2014-02-20T14:04:29.040258+00:00", "id": "3986", "last_notification": null, "period": "365 days, 0:00:00", "price": "0E-20", "remaining": "8000", "resource": "cpu", "resource_uri": "/api/2.0/subscriptions/3986/", "start_time": "2013-02-20T14:04:29.040258+00:00", "status": "active", "subscribed_object": null, "uuid": "a265c47f-1a00-4095-acfc-2193622bfbd8" }, { "amount": "32212254720", "auto_renew": true, "descendants": [], "discount_amount": null, "discount_percent": null, "end_time": "2014-02-20T14:04:44.088984+00:00", "id": "3987", "last_notification": null, "period": "365 days, 0:00:00", "price": "0E-20", "remaining": "32212254720", "resource": "dssd", "resource_uri": "/api/2.0/subscriptions/3987/", "start_time": "2013-02-20T14:04:44.088984+00:00", "status": "active", "subscribed_object": null, "uuid": "8965ff95-4924-40a9-b923-a58615149732" } ], "price": "0E-20" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json0000664000175000017500000000025312701023453032422 0ustar kamikami00000000000000[{"error_point": null, "error_type": "permission", "error_message": "Cannot stop guest in state \"stopped\". Guest should be in state \"['started', 'running_legacy']\""}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json0000664000175000017500000000053512701023453027776 0ustar kamikami00000000000000{ "meta": {}, "name": "test tag 2", "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "resource_uri": "/api/2.0/tags/a010ec41-2ead-4630-a1d0-237fa77e4d4d/", "resources": [], "uuid": "a010ec41-2ead-4630-a1d0-237fa77e4d4d" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json0000664000175000017500000001305112701023453033577 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 2 }, "objects": [ { "context": true, "cpu": 1000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 1073741824, "meta": { "description": "test description 2", "ssh_public_key": "" }, "name": "test no drives", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:98:ce:04:50:df", "model": "virtio", "runtime": { "interface_type": "public", "io": { "bytes_recv": "1323", "bytes_sent": "21535", "packets_recv": "3", "packets_sent": "278" }, "ip_v4": { "resource_uri": "/api/2.0/ips/185.12.5.181/", "uuid": "185.12.5.181" }, "ip_v6": null }, "vlan": null }, { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:2c:03:99:32:be", "model": "virtio", "runtime": { "interface_type": "public", "io": { "bytes_recv": "0", "bytes_sent": "0", "packets_recv": "0", "packets_sent": "0" }, "ip_v4": { "resource_uri": "/api/2.0/ips/178.22.68.55/", "uuid": "178.22.68.55" }, "ip_v6": null }, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/9de75ed6-fd33-45e2-963f-d405f31fd911/", "runtime": { "active_since": "2013-11-05T10:15:42+00:00", "nics": [ { "interface_type": "public", "io": { "bytes_recv": "1323", "bytes_sent": "21535", "packets_recv": "3", "packets_sent": "278" }, "ip_v4": { "resource_uri": "/api/2.0/ips/185.12.5.181/", "uuid": "185.12.5.181" }, "ip_v6": null, "mac": "22:98:ce:04:50:df" } ] }, "smp": 1, "status": "running", "tags": [], "uuid": "9de75ed6-fd33-45e2-963f-d405f31fd911", "vnc_password": "foo" }, { "context": true, "cpu": 2000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [ { "boot_order": 1, "dev_channel": "0:0", "device": "ide", "drive": { "resource_uri": "/api/2.0/drives/3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9/", "uuid": "3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9" } } ], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 2147483648, "meta": { "description": "test1" }, "name": "test-1", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:22:4e:1e:e0:7e", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/9414bbeb-e908-4e55-ae3f-2eb61adc50d8/", "runtime": null, "smp": 1, "status": "stopped", "tags": [], "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8", "vnc_password": "bar" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json0000664000175000017500000000103712701023453032246 0ustar kamikami00000000000000[ [ "ad2b0b9c-8b66-45bc-a0f8-3a8514b78406", "e464a01b-ad2a-4bed-a4dd-30d1687560fd", "2246e488-a1b9-4da2-af30-0b6c73a1529c", "51a6b22f-2884-48d9-87f8-c85cb6f43c99", "a67c932d-6766-470b-b1c5-17856e4a5b4e", "3af58efd-8442-466f-80bf-48c5a2ee84b6", "ab35089c-0a89-435f-aedd-eaa05fae0ef1", "9972280b-3d74-4b0d-85de-caa0ef0117a6", "c47bca85-0199-438c-9ae4-d308357cf22d", "3e166706-188c-4f38-b8d5-7fc10a5019a1", "ba47e1e9-1848-48bd-8786-9cc45744214c" ] ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json0000664000175000017500000000042712701023453032445 0ustar kamikami00000000000000[ [ "63456dc4-36f9-4a3f-b478-4d376b8ff5a9", "ede05e68-c997-4aad-816a-39469fd1a562" ], [ "ad8caf99-45c4-45fc-8ba7-acb8a68be66f", "4b9e1487-0b80-4f65-9c3e-e840dde27ccd" ], [ "658bafdf-8fbf-4fc6-be4f-74ecc7f0e8a5" ] ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json0000664000175000017500000000123612701023453030333 0ustar kamikami00000000000000{ "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "test drive 5", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", "runtime": { "snapshots_allocated_size": 0, "storage_type": null }, "size": 2097152000, "snapshots": [], "status": "unmounted", "storage_type": null, "tags": [], "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json0000664000175000017500000000255512701023453031221 0ustar kamikami00000000000000{ "objects": [ { "context": true, "cpu": 1000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 536870912, "meta": {}, "name": "test node", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:08:76:b1:ce:41", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "requirements": [], "resource_uri": "/api/2.0/servers/9de75ed6_fd33_45e2_963f_d405f31fd911/", "runtime": null, "smp": 1, "status": "started", "tags": [], "uuid": "9de75ed6_fd33_45e2_963f_d405f31fd911", "vnc_password": "testserver" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json0000664000175000017500000000013112701023453031056 0ustar kamikami00000000000000{"action": "start", "result": "success", "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8"} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json0000664000175000017500000000173612701023453030465 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 2 }, "objects": [ { "meta": {}, "name": "test tag 2", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/tags/a010ec41-2ead-4630-a1d0-237fa77e4d4d/", "resources": [], "uuid": "a010ec41-2ead-4630-a1d0-237fa77e4d4d" }, { "meta": {}, "name": "test tag 1", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/tags/e60bb2d2-08d4-4255-adac-5faf87efcdd2/", "resources": [], "uuid": "e60bb2d2-08d4-4255-adac-5faf87efcdd2" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json0000664000175000017500000000140712701023453032247 0ustar kamikami00000000000000{ "objects": [ { "amount": "1", "auto_renew": false, "descendants": [], "discount_amount": "0", "discount_percent": "0", "end_time": "2014-03-01T12:00:00+00:00", "id": "228816", "period": "1 month", "price": "10.26666666666666666666666667", "remaining": "1", "resource": "vlan", "resource_uri": "/api/2.0/subscriptions/228816/", "start_time": "2014-01-31T17:06:19.388295+00:00", "status": "active", "subscribed_object": "2494079f-8376-40bf-9b37-34d633b8a7b7", "uuid": "0dd25c5c-6c01-498f-b009-e07d76552a1a" } ], "price": "10.26666666666666666666666667" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json0000664000175000017500000000215112701023453030475 0ustar kamikami00000000000000{ "meta": {}, "name": "test tag 3", "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "resource_uri": "/api/2.0/tags/900ac9c6-2f98-48a4-b406-5494b4ea4663/", "resources": [ { "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "res_type": "servers", "resource_uri": "/api/2.0/servers/79f7853b-04bd-44f5-a2c2-fa56f6861994/", "uuid": "79f7853b-04bd-44f5-a2c2-fa56f6861994" }, { "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "res_type": "drives", "resource_uri": "/api/2.0/drives/8c48e0bd-e17b-49ca-8926-654107d2b7e7/", "uuid": "8c48e0bd-e17b-49ca-8926-654107d2b7e7" } ], "uuid": "900ac9c6-2f98-48a4-b406-5494b4ea4663" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json0000664000175000017500000000015312701023453031721 0ustar kamikami00000000000000{ "action": "close_vnc", "result": "success", "uuid": "2e64e5e4-f31d-471a-ac1b-1ae079652e40" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json0000664000175000017500000000231112701023453032567 0ustar kamikami00000000000000{ "context": true, "cpu": 2000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 536870912, "meta": {}, "name": "test_server_updated", "nics": [ { "boot_order": null, "firewall_policy": { "resource_uri": "/api/2.0/fwpolicies/461dfb8c-e641-43d7-a20e-32e2aa399086/", "uuid": "461dfb8c-e641-43d7-a20e-32e2aa399086" }, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:58:83:c4:07:fc", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "requirements": [], "resource_uri": "/api/2.0/servers/d6bde7f2-69ca-4825-909b-fcc08ea928ef/", "runtime": null, "smp": 1, "status": "stopped", "tags": [], "uuid": "d6bde7f2-69ca-4825-909b-fcc08ea928ef", "vnc_password": "updated_password" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json0000664000175000017500000000447212701023453033274 0ustar kamikami00000000000000{ "objects": [ { "context": true, "cpu": 1100, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [ { "boot_order": 1, "dev_channel": "0:0", "device": "ide", "drive": { "resource_uri": "/api/2.0/drives/7c0efbb2-b1e8-4e77-9d72-9f9f9d75ae7b/", "uuid": "7c0efbb2-b1e8-4e77-9d72-9f9f9d75ae7b" }, "runtime": null } ], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 671088640, "meta": {}, "name": "test node vlan", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:94:75:3c:16:34", "model": "virtio", "runtime": null, "vlan": null }, { "boot_order": null, "firewall_policy": null, "ip_v4_conf": null, "ip_v6_conf": null, "mac": "22:84:c4:af:f3:fc", "model": "virtio", "runtime": null, "vlan": { "resource_uri": "/api/2.0/vlans/39ae851d-433f-4ac2-a803-ffa24cb1fa3e/", "uuid": "39ae851d-433f-4ac2-a803-ffa24cb1fa3e" } } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/c8b034fb-9e66-4892-be12-a36121d4b704/", "runtime": null, "smp": 1, "status": "stopped", "tags": [], "uuid": "9de75ed6_fd33_45e2_963f_d405f31fd911", "vnc_password": "testserver" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json0000664000175000017500000000076012701023453033576 0ustar kamikami00000000000000{ "objects": [ { "meta": {}, "name": "test policy 1", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/fwpolicies/ae9e5982-33fd-4e89-a467-4480256ccdb6/", "rules": [], "servers": [], "uuid": "ae9e5982-33fd-4e89-a467-4480256ccdb6" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json0000664000175000017500000000022612701023453032562 0ustar kamikami00000000000000[{"error_point": null, "error_type": "permission", "error_message": "Cannot start guest in state \"started\". Guest should be in state \"stopped\""}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json0000664000175000017500000000072012701023453030456 0ustar kamikami00000000000000{ "objects": [ { "meta": {}, "name": "test tag 3", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/tags/c0008127-6dbf-4cf3-85f5-203f4c3967fa/", "resources": [], "uuid": "c0008127-6dbf-4cf3-85f5-203f4c3967fa" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json0000664000175000017500000000005012701023453027556 0ustar kamikami00000000000000{"balance": "10.00", "currency": "USD"} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json0000664000175000017500000000645312701023453033607 0ustar kamikami00000000000000{ "meta": { "limit": 20, "offset": 0, "total_count": 2 }, "objects": [ { "context": true, "cpu": 1000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 1073741824, "meta": { "description": "test description 2", "ssh_public_key": "" }, "name": "test no drives", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:98:ce:04:50:df", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/9de75ed6-fd33-45e2-963f-d405f31fd911/", "runtime": null, "smp": 1, "status": "stopped", "tags": [], "uuid": "9de75ed6-fd33-45e2-963f-d405f31fd911", "vnc_password": "bar" }, { "context": true, "cpu": 2000, "cpu_model": null, "cpus_instead_of_cores": false, "drives": [ { "boot_order": 1, "dev_channel": "0:0", "device": "ide", "drive": { "resource_uri": "/api/2.0/drives/3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9/", "uuid": "3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9" } } ], "enable_numa": false, "hv_relaxed": false, "hv_tsc": false, "mem": 2147483648, "meta": { "description": "test1" }, "name": "test-1", "nics": [ { "boot_order": null, "firewall_policy": null, "ip_v4_conf": { "conf": "dhcp", "ip": null }, "ip_v6_conf": null, "mac": "22:22:4e:1e:e0:7e", "model": "virtio", "runtime": null, "vlan": null } ], "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "requirements": [], "resource_uri": "/api/2.0/servers/9414bbeb-e908-4e55-ae3f-2eb61adc50d8/", "runtime": null, "smp": 1, "status": "stopped", "tags": [], "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8", "vnc_password": "foo" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json0000664000175000017500000000160012701023453031012 0ustar kamikami00000000000000{ "objects": [ { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "test drive 5", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", "runtime": { "snapshots_allocated_size": 0, "storage_type": null }, "size": 2097152000, "snapshots": [], "status": "creating", "storage_type": null, "tags": [], "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json0000664000175000017500000000160012701023453031050 0ustar kamikami00000000000000{ "objects": [ { "affinities": [], "allow_multimount": false, "jobs": [], "licenses": [], "media": "disk", "meta": {}, "mounted_on": [], "name": "test drive 5", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", "runtime": { "snapshots_allocated_size": 0, "storage_type": null }, "size": 1164967936, "snapshots": [], "status": "creating", "storage_type": null, "tags": [], "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json0000664000175000017500000000157612701023453033615 0ustar kamikami00000000000000{ "objects": [ { "meta": {}, "name": "test tag 3", "owner": { "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" }, "resource_uri": "/api/2.0/tags/c0008127-6dbf-4cf3-85f5-203f4c3967fa/", "resources": [ { "owner": { "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" }, "res_type": "vlans", "resource_uri": "/api/2.0/vlans/96537817-f4b6-496b-a861-e74192d3ccb0/", "uuid": "1" } ], "uuid": "c0008127-6dbf-4cf3-85f5-203f4c3967fa" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json0000664000175000017500000000013012701023453030705 0ustar kamikami00000000000000{"action": "stop", "result": "success", "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8"} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json0000664000175000017500000023171612701023453027643 0ustar kamikami00000000000000{ "current": { "cpu": 5, "dssd": 1, "ip": 1, "mem": 7, "msft_7jq_00341": 1, "msft_7nq_00302": 1, "msft_lwa_00135": 1, "msft_p71_01031": 1, "msft_p73_04837": 1, "msft_p73_04837_core": 1, "msft_tfa_00009": 1, "msft_tfa_00523": 1, "sms": 1, "ssd": 1, "tx": 8, "vlan": 1 }, "meta": { "limit": 0, "offset": 0, "total_count": 316 }, "next": { "cpu": 5, "dssd": 1, "ip": 1, "mem": 7, "msft_7jq_00341": 1, "msft_7nq_00302": 1, "msft_lwa_00135": 1, "msft_p71_01031": 1, "msft_p73_04837": 1, "msft_p73_04837_core": 1, "msft_tfa_00009": 1, "msft_tfa_00523": 1, "sms": 1, "ssd": 1, "tx": 10, "vlan": 1 }, "objects": [ { "currency": "CHF", "id": "18", "level": 1, "multiplier": 2783138807808000, "price": "0.26600000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "EUR", "id": "20", "level": 1, "multiplier": 2783138807808000, "price": "0.21000000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "GBP", "id": "22", "level": 1, "multiplier": 2783138807808000, "price": "0.18200000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "USD", "id": "24", "level": 1, "multiplier": 2783138807808000, "price": "0.28000000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "CHF", "id": "26", "level": 1, "multiplier": 2783138807808000, "price": "0.76000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "EUR", "id": "28", "level": 1, "multiplier": 2783138807808000, "price": "0.60000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "GBP", "id": "30", "level": 1, "multiplier": 2783138807808000, "price": "0.52000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "USD", "id": "32", "level": 1, "multiplier": 2783138807808000, "price": "0.80000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "CHF", "id": "34", "level": 1, "multiplier": 2592000, "price": "4.75000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "EUR", "id": "36", "level": 1, "multiplier": 2592000, "price": "3.75000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "GBP", "id": "38", "level": 1, "multiplier": 2592000, "price": "3.25000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "USD", "id": "40", "level": 1, "multiplier": 2592000, "price": "5.00000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "USD", "id": "8", "level": 1, "multiplier": 3600000, "price": "0.00790500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "42", "level": 1, "multiplier": 2592000, "price": "19.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "USD", "id": "104", "level": 2, "multiplier": 3600000, "price": "0.00816000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "44", "level": 1, "multiplier": 2592000, "price": "15.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "USD", "id": "108", "level": 3, "multiplier": 3600000, "price": "0.00841500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "46", "level": 1, "multiplier": 2592000, "price": "13.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "USD", "id": "112", "level": 4, "multiplier": 3600000, "price": "0.00867000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "48", "level": 1, "multiplier": 2592000, "price": "20.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "CHF", "id": "2", "level": 1, "multiplier": 3600000, "price": "0.00875500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "54", "level": 1, "multiplier": 1073741824, "price": "0.01950000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "101", "level": 2, "multiplier": 3600000, "price": "0.00901000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "105", "level": 3, "multiplier": 3600000, "price": "0.00935000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "109", "level": 4, "multiplier": 3600000, "price": "0.00960500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "113", "level": 5, "multiplier": 3600000, "price": "0.01028500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "117", "level": 6, "multiplier": 3600000, "price": "0.01088000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "121", "level": 7, "multiplier": 3600000, "price": "0.01207000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "125", "level": 8, "multiplier": 3600000, "price": "0.01385500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "10", "level": 1, "multiplier": 3865470566400, "price": "0.01256000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "133", "level": 10, "multiplier": 3865470566400, "price": "0.02080000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "137", "level": 11, "multiplier": 3865470566400, "price": "0.02192000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "141", "level": 12, "multiplier": 3865470566400, "price": "0.02288000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "116", "level": 5, "multiplier": 3600000, "price": "0.00926500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "120", "level": 6, "multiplier": 3600000, "price": "0.00977500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "124", "level": 7, "multiplier": 3600000, "price": "0.01088000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "16", "level": 1, "multiplier": 3865470566400, "price": "0.01128000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "136", "level": 10, "multiplier": 3865470566400, "price": "0.01872000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "140", "level": 11, "multiplier": 3865470566400, "price": "0.01968000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "144", "level": 12, "multiplier": 3865470566400, "price": "0.02064000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "148", "level": 13, "multiplier": 3865470566400, "price": "0.02136000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "152", "level": 14, "multiplier": 3865470566400, "price": "0.02184000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "156", "level": 15, "multiplier": 3865470566400, "price": "0.02232000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "160", "level": 16, "multiplier": 3865470566400, "price": "0.02280000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "4", "level": 1, "multiplier": 3600000, "price": "0.00612000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "102", "level": 2, "multiplier": 3600000, "price": "0.00637500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "106", "level": 3, "multiplier": 3600000, "price": "0.00654500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "110", "level": 4, "multiplier": 3600000, "price": "0.00671500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "114", "level": 5, "multiplier": 3600000, "price": "0.00714000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "128", "level": 8, "multiplier": 3600000, "price": "0.01249500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "132", "level": 9, "multiplier": 3600000, "price": "0.01411000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "60", "level": 10, "multiplier": 3600000, "price": "0.01623500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "64", "level": 11, "multiplier": 3600000, "price": "0.01734000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "68", "level": 12, "multiplier": 3600000, "price": "0.01844500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "72", "level": 13, "multiplier": 3600000, "price": "0.01921000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "76", "level": 14, "multiplier": 3600000, "price": "0.01980500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "80", "level": 15, "multiplier": 3600000, "price": "0.02031500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "118", "level": 6, "multiplier": 3600000, "price": "0.00756500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "122", "level": 7, "multiplier": 3600000, "price": "0.00841500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "126", "level": 8, "multiplier": 3600000, "price": "0.00969000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "130", "level": 9, "multiplier": 3600000, "price": "0.01096500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "58", "level": 10, "multiplier": 3600000, "price": "0.01266500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "62", "level": 11, "multiplier": 3600000, "price": "0.01351500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "66", "level": 12, "multiplier": 3600000, "price": "0.01428000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "145", "level": 13, "multiplier": 3865470566400, "price": "0.02376000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "149", "level": 14, "multiplier": 3865470566400, "price": "0.02424000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "153", "level": 15, "multiplier": 3865470566400, "price": "0.02480000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "157", "level": 16, "multiplier": 3865470566400, "price": "0.02536000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "50", "level": 1, "multiplier": 1073741824, "price": "0.03250000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "253", "level": 2, "multiplier": 1073741824, "price": "0.03420000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "257", "level": 3, "multiplier": 1073741824, "price": "0.03750000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "261", "level": 4, "multiplier": 1073741824, "price": "0.04250000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "265", "level": 5, "multiplier": 1073741824, "price": "0.04750000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "269", "level": 6, "multiplier": 1073741824, "price": "0.05170000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "273", "level": 7, "multiplier": 1073741824, "price": "0.05580000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "277", "level": 8, "multiplier": 1073741824, "price": "0.05920000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "281", "level": 9, "multiplier": 1073741824, "price": "0.06250000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "209", "level": 10, "multiplier": 1073741824, "price": "0.06500000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "213", "level": 11, "multiplier": 1073741824, "price": "0.06750000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "217", "level": 12, "multiplier": 1073741824, "price": "0.07000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "221", "level": 13, "multiplier": 1073741824, "price": "0.07330000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "225", "level": 14, "multiplier": 1073741824, "price": "0.07830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "229", "level": 15, "multiplier": 1073741824, "price": "0.08330000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "233", "level": 16, "multiplier": 1073741824, "price": "0.08830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "84", "level": 16, "multiplier": 3600000, "price": "0.02091000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "5", "level": 0, "multiplier": 3600000, "price": "0.01105000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "13", "level": 0, "multiplier": 3865470566400, "price": "0.01352000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "53", "level": 0, "multiplier": 1073741824, "price": "0.04225000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "255", "level": 2, "multiplier": 1073741824, "price": "0.02050000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "129", "level": 9, "multiplier": 3600000, "price": "0.01564000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "57", "level": 10, "multiplier": 3600000, "price": "0.01810500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "61", "level": 11, "multiplier": 3600000, "price": "0.01929500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "65", "level": 12, "multiplier": 3600000, "price": "0.02048500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "69", "level": 13, "multiplier": 3600000, "price": "0.02142000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "73", "level": 14, "multiplier": 3600000, "price": "0.02193000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "77", "level": 15, "multiplier": 3600000, "price": "0.02261000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "81", "level": 16, "multiplier": 3600000, "price": "0.02320500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "259", "level": 3, "multiplier": 1073741824, "price": "0.02250000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "85", "level": 17, "multiplier": 3600000, "price": "0.02558500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "263", "level": 4, "multiplier": 1073741824, "price": "0.02550000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "89", "level": 18, "multiplier": 3600000, "price": "0.02805000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "267", "level": 5, "multiplier": 1073741824, "price": "0.02850000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "271", "level": 6, "multiplier": 1073741824, "price": "0.03100000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "210", "level": 10, "multiplier": 1073741824, "price": "0.04550000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "212", "level": 10, "multiplier": 1073741824, "price": "0.05850000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "275", "level": 7, "multiplier": 1073741824, "price": "0.03350000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "214", "level": 11, "multiplier": 1073741824, "price": "0.04730000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "216", "level": 11, "multiplier": 1073741824, "price": "0.06080000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "279", "level": 8, "multiplier": 1073741824, "price": "0.03550000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "218", "level": 12, "multiplier": 1073741824, "price": "0.04900000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "220", "level": 12, "multiplier": 1073741824, "price": "0.06300000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "283", "level": 9, "multiplier": 1073741824, "price": "0.03750000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "222", "level": 13, "multiplier": 1073741824, "price": "0.05130000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "224", "level": 13, "multiplier": 1073741824, "price": "0.06600000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "211", "level": 10, "multiplier": 1073741824, "price": "0.03900000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "226", "level": 14, "multiplier": 1073741824, "price": "0.05480000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "228", "level": 14, "multiplier": 1073741824, "price": "0.07050000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "215", "level": 11, "multiplier": 1073741824, "price": "0.04050000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "230", "level": 15, "multiplier": 1073741824, "price": "0.05830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "232", "level": 15, "multiplier": 1073741824, "price": "0.07500000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "219", "level": 12, "multiplier": 1073741824, "price": "0.04200000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "234", "level": 16, "multiplier": 1073741824, "price": "0.06180000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "236", "level": 16, "multiplier": 1073741824, "price": "0.07950000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "223", "level": 13, "multiplier": 1073741824, "price": "0.04400000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "238", "level": 17, "multiplier": 1073741824, "price": "0.06530000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "240", "level": 17, "multiplier": 1073741824, "price": "0.08400000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "227", "level": 14, "multiplier": 1073741824, "price": "0.04700000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "242", "level": 18, "multiplier": 1073741824, "price": "0.07000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "244", "level": 18, "multiplier": 1073741824, "price": "0.09000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "231", "level": 15, "multiplier": 1073741824, "price": "0.05000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "246", "level": 19, "multiplier": 1073741824, "price": "0.07580000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "248", "level": 19, "multiplier": 1073741824, "price": "0.09750000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "52", "level": 1, "multiplier": 1073741824, "price": "0.02280000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "56", "level": 1, "multiplier": 1073741824, "price": "0.02930000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "235", "level": 16, "multiplier": 1073741824, "price": "0.05300000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "250", "level": 20, "multiplier": 1073741824, "price": "0.08280000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "252", "level": 20, "multiplier": 1073741824, "price": "0.10650000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "239", "level": 17, "multiplier": 1073741824, "price": "0.05600000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "254", "level": 2, "multiplier": 1073741824, "price": "0.02390000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "256", "level": 2, "multiplier": 1073741824, "price": "0.03080000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "243", "level": 18, "multiplier": 1073741824, "price": "0.06000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "258", "level": 3, "multiplier": 1073741824, "price": "0.02630000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "260", "level": 3, "multiplier": 1073741824, "price": "0.03380000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "247", "level": 19, "multiplier": 1073741824, "price": "0.06500000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "262", "level": 4, "multiplier": 1073741824, "price": "0.02980000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "264", "level": 4, "multiplier": 1073741824, "price": "0.03830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "GBP", "id": "251", "level": 20, "multiplier": 1073741824, "price": "0.07100000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "266", "level": 5, "multiplier": 1073741824, "price": "0.03330000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "268", "level": 5, "multiplier": 1073741824, "price": "0.04280000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "270", "level": 6, "multiplier": 1073741824, "price": "0.03620000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "272", "level": 6, "multiplier": 1073741824, "price": "0.04650000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "274", "level": 7, "multiplier": 1073741824, "price": "0.03910000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "276", "level": 7, "multiplier": 1073741824, "price": "0.05030000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "278", "level": 8, "multiplier": 1073741824, "price": "0.04140000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "280", "level": 8, "multiplier": 1073741824, "price": "0.05330000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "282", "level": 9, "multiplier": 1073741824, "price": "0.04380000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "284", "level": 9, "multiplier": 1073741824, "price": "0.05630000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "180", "level": 2, "multiplier": 3865470566400, "price": "0.01152000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "93", "level": 19, "multiplier": 3600000, "price": "0.03162000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "CHF", "id": "97", "level": 20, "multiplier": 3600000, "price": "0.03638000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "184", "level": 3, "multiplier": 3865470566400, "price": "0.01176000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "306", "level": 0, "multiplier": 2592000, "price": "15.69000000000000000000", "resource": "msft_7nq_00302", "unit": "" }, { "currency": "CHF", "id": "297", "level": 0, "multiplier": 2592000, "price": "8.50000000000000000000", "resource": "msft_p71_01031", "unit": "" }, { "currency": "CHF", "id": "301", "level": 0, "multiplier": 2592000, "price": "1.60000000000000000000", "resource": "msft_p73_04837_core", "unit": "" }, { "currency": "CHF", "id": "307", "level": 0, "multiplier": 2592000, "price": "0.98000000000000000000", "resource": "msft_tfa_00523", "unit": "" }, { "currency": "EUR", "id": "295", "level": 0, "multiplier": 2592000, "price": "350.00000000000000000000", "resource": "msft_tfa_00009", "unit": "count/month" }, { "currency": "EUR", "id": "313", "level": 0, "multiplier": 2592000, "price": "0.73010000000000000000", "resource": "msft_tfa_00523", "unit": "" }, { "currency": "GBP", "id": "314", "level": 0, "multiplier": 2592000, "price": "38.51520000000000000000", "resource": "msft_7jq_00341", "unit": "" }, { "currency": "GBP", "id": "315", "level": 0, "multiplier": 2592000, "price": "10.04160000000000000000", "resource": "msft_7nq_00302", "unit": "" }, { "currency": "GBP", "id": "288", "level": 0, "multiplier": 2592000, "price": "15.00000000000000000000", "resource": "msft_lwa_00135", "unit": "count/month" }, { "currency": "GBP", "id": "300", "level": 0, "multiplier": 2592000, "price": "5.44000000000000000000", "resource": "msft_p71_01031", "unit": "" }, { "currency": "GBP", "id": "292", "level": 0, "multiplier": 2592000, "price": "27.00000000000000000000", "resource": "msft_p73_04837", "unit": "count/month" }, { "currency": "GBP", "id": "304", "level": 0, "multiplier": 2592000, "price": "1.02400000000000000000", "resource": "msft_p73_04837_core", "unit": "" }, { "currency": "GBP", "id": "296", "level": 0, "multiplier": 2592000, "price": "300.00000000000000000000", "resource": "msft_tfa_00009", "unit": "count/month" }, { "currency": "GBP", "id": "316", "level": 0, "multiplier": 2592000, "price": "0.62720000000000000000", "resource": "msft_tfa_00523", "unit": "" }, { "currency": "USD", "id": "188", "level": 4, "multiplier": 3865470566400, "price": "0.01200000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "192", "level": 5, "multiplier": 3865470566400, "price": "0.01248000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "196", "level": 6, "multiplier": 3865470566400, "price": "0.01296000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "200", "level": 7, "multiplier": 3865470566400, "price": "0.01392000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "204", "level": 8, "multiplier": 3865470566400, "price": "0.01536000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "208", "level": 9, "multiplier": 3865470566400, "price": "0.01680000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "164", "level": 17, "multiplier": 3865470566400, "price": "0.02472000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "168", "level": 18, "multiplier": 3865470566400, "price": "0.02664000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "172", "level": 19, "multiplier": 3865470566400, "price": "0.02952000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "176", "level": 20, "multiplier": 3865470566400, "price": "0.03336000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "178", "level": 2, "multiplier": 3865470566400, "price": "0.00896000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "182", "level": 3, "multiplier": 3865470566400, "price": "0.00912000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "186", "level": 4, "multiplier": 3865470566400, "price": "0.00936000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "190", "level": 5, "multiplier": 3865470566400, "price": "0.00976000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "194", "level": 6, "multiplier": 3865470566400, "price": "0.01008000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "198", "level": 7, "multiplier": 3865470566400, "price": "0.01080000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "202", "level": 8, "multiplier": 3865470566400, "price": "0.01200000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "206", "level": 9, "multiplier": 3865470566400, "price": "0.01304000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "162", "level": 17, "multiplier": 3865470566400, "price": "0.01920000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "166", "level": 18, "multiplier": 3865470566400, "price": "0.02072000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "170", "level": 19, "multiplier": 3865470566400, "price": "0.02296000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "174", "level": 20, "multiplier": 3865470566400, "price": "0.02592000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "179", "level": 2, "multiplier": 3865470566400, "price": "0.00768000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "183", "level": 3, "multiplier": 3865470566400, "price": "0.00784000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "187", "level": 4, "multiplier": 3865470566400, "price": "0.00800000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "191", "level": 5, "multiplier": 3865470566400, "price": "0.00832000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "195", "level": 6, "multiplier": 3865470566400, "price": "0.00864000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "1", "level": 0, "multiplier": 3600000, "price": "0.01700000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "3", "level": 0, "multiplier": 3600000, "price": "0.01360000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "11", "level": 0, "multiplier": 3865470566400, "price": "0.01664000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "51", "level": 0, "multiplier": 1073741824, "price": "0.05200000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "EUR", "id": "19", "level": 0, "multiplier": 2783138807808000, "price": "0.16000000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "EUR", "id": "27", "level": 0, "multiplier": 2783138807808000, "price": "0.16000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "EUR", "id": "35", "level": 0, "multiplier": 2592000, "price": "4.00000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "EUR", "id": "43", "level": 0, "multiplier": 2592000, "price": "8.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "GBP", "id": "21", "level": 0, "multiplier": 2783138807808000, "price": "0.01352000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "GBP", "id": "29", "level": 0, "multiplier": 2783138807808000, "price": "0.01352000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "GBP", "id": "37", "level": 0, "multiplier": 2592000, "price": "3.25000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "GBP", "id": "45", "level": 0, "multiplier": 2592000, "price": "6.50000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "CHF", "id": "9", "level": 0, "multiplier": 3865470566400, "price": "0.02080000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "49", "level": 0, "multiplier": 1073741824, "price": "0.06500000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "17", "level": 0, "multiplier": 2783138807808000, "price": "0.20000000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "CHF", "id": "25", "level": 0, "multiplier": 2783138807808000, "price": "0.45000000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "CHF", "id": "33", "level": 0, "multiplier": 2592000, "price": "5.00000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "CHF", "id": "41", "level": 0, "multiplier": 2592000, "price": "9.50000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "USD", "id": "7", "level": 0, "multiplier": 3600000, "price": "0.01640000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "15", "level": 0, "multiplier": 3865470566400, "price": "0.01840000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "USD", "id": "55", "level": 0, "multiplier": 1073741824, "price": "0.05850000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "23", "level": 0, "multiplier": 2783138807808000, "price": "0.18000000000000000000", "resource": "dssd", "unit": "GB/month" }, { "currency": "USD", "id": "31", "level": 0, "multiplier": 2783138807808000, "price": "0.47400000000000000000", "resource": "ssd", "unit": "GB/month" }, { "currency": "USD", "id": "39", "level": 0, "multiplier": 2592000, "price": "4.50000000000000000000", "resource": "ip", "unit": "IP" }, { "currency": "USD", "id": "47", "level": 0, "multiplier": 2592000, "price": "10.00000000000000000000", "resource": "vlan", "unit": "VLAN" }, { "currency": "CHF", "id": "177", "level": 2, "multiplier": 3865470566400, "price": "0.01280000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "181", "level": 3, "multiplier": 3865470566400, "price": "0.01304000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "185", "level": 4, "multiplier": 3865470566400, "price": "0.01328000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "189", "level": 5, "multiplier": 3865470566400, "price": "0.01392000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "193", "level": 6, "multiplier": 3865470566400, "price": "0.01440000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "197", "level": 7, "multiplier": 3865470566400, "price": "0.01552000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "201", "level": 8, "multiplier": 3865470566400, "price": "0.01712000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "205", "level": 9, "multiplier": 3865470566400, "price": "0.01872000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "161", "level": 17, "multiplier": 3865470566400, "price": "0.02744000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "165", "level": 18, "multiplier": 3865470566400, "price": "0.02960000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "169", "level": 19, "multiplier": 3865470566400, "price": "0.03280000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "173", "level": 20, "multiplier": 3865470566400, "price": "0.03704000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "237", "level": 17, "multiplier": 1073741824, "price": "0.09330000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "241", "level": 18, "multiplier": 1073741824, "price": "0.10000000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "245", "level": 19, "multiplier": 1073741824, "price": "0.10830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "CHF", "id": "249", "level": 20, "multiplier": 1073741824, "price": "0.11830000000000000000", "resource": "tx", "unit": "GB" }, { "currency": "USD", "id": "88", "level": 17, "multiplier": 3600000, "price": "0.02303500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "92", "level": 18, "multiplier": 3600000, "price": "0.02524500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "96", "level": 19, "multiplier": 3600000, "price": "0.02847500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "USD", "id": "100", "level": 20, "multiplier": 3600000, "price": "0.03281000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "70", "level": 13, "multiplier": 3600000, "price": "0.01496000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "74", "level": 14, "multiplier": 3600000, "price": "0.01538500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "78", "level": 15, "multiplier": 3600000, "price": "0.01581000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "82", "level": 16, "multiplier": 3600000, "price": "0.01623500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "86", "level": 17, "multiplier": 3600000, "price": "0.01793500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "90", "level": 18, "multiplier": 3600000, "price": "0.01955000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "94", "level": 19, "multiplier": 3600000, "price": "0.02210000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "98", "level": 20, "multiplier": 3600000, "price": "0.02550000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "EUR", "id": "12", "level": 1, "multiplier": 3865470566400, "price": "0.00880000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "134", "level": 10, "multiplier": 3865470566400, "price": "0.01456000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "138", "level": 11, "multiplier": 3865470566400, "price": "0.01528000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "142", "level": 12, "multiplier": 3865470566400, "price": "0.01608000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "146", "level": 13, "multiplier": 3865470566400, "price": "0.01664000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "150", "level": 14, "multiplier": 3865470566400, "price": "0.01696000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "154", "level": 15, "multiplier": 3865470566400, "price": "0.01736000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "EUR", "id": "158", "level": 16, "multiplier": 3865470566400, "price": "0.01776000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "6", "level": 1, "multiplier": 3600000, "price": "0.00527000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "103", "level": 2, "multiplier": 3600000, "price": "0.00544000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "107", "level": 3, "multiplier": 3600000, "price": "0.00561000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "111", "level": 4, "multiplier": 3600000, "price": "0.00578000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "115", "level": 5, "multiplier": 3600000, "price": "0.00612000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "119", "level": 6, "multiplier": 3600000, "price": "0.00654500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "123", "level": 7, "multiplier": 3600000, "price": "0.00722500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "127", "level": 8, "multiplier": 3600000, "price": "0.00833000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "131", "level": 9, "multiplier": 3600000, "price": "0.00943500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "59", "level": 10, "multiplier": 3600000, "price": "0.01088000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "63", "level": 11, "multiplier": 3600000, "price": "0.01156000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "67", "level": 12, "multiplier": 3600000, "price": "0.01232500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "71", "level": 13, "multiplier": 3600000, "price": "0.01283500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "75", "level": 14, "multiplier": 3600000, "price": "0.01326000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "79", "level": 15, "multiplier": 3600000, "price": "0.01360000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "83", "level": 16, "multiplier": 3600000, "price": "0.01394000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "87", "level": 17, "multiplier": 3600000, "price": "0.01538500000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "91", "level": 18, "multiplier": 3600000, "price": "0.01683000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "95", "level": 19, "multiplier": 3600000, "price": "0.01904000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "99", "level": 20, "multiplier": 3600000, "price": "0.02193000000000000000", "resource": "cpu", "unit": "GHz/hour" }, { "currency": "GBP", "id": "14", "level": 1, "multiplier": 3865470566400, "price": "0.00752000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "199", "level": 7, "multiplier": 3865470566400, "price": "0.00928000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "203", "level": 8, "multiplier": 3865470566400, "price": "0.01024000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "207", "level": 9, "multiplier": 3865470566400, "price": "0.01120000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "135", "level": 10, "multiplier": 3865470566400, "price": "0.01248000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "139", "level": 11, "multiplier": 3865470566400, "price": "0.01312000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "143", "level": 12, "multiplier": 3865470566400, "price": "0.01376000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "147", "level": 13, "multiplier": 3865470566400, "price": "0.01424000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "151", "level": 14, "multiplier": 3865470566400, "price": "0.01456000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "155", "level": 15, "multiplier": 3865470566400, "price": "0.01488000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "159", "level": 16, "multiplier": 3865470566400, "price": "0.01520000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "163", "level": 17, "multiplier": 3865470566400, "price": "0.01648000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "167", "level": 18, "multiplier": 3865470566400, "price": "0.01776000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "171", "level": 19, "multiplier": 3865470566400, "price": "0.01968000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "GBP", "id": "175", "level": 20, "multiplier": 3865470566400, "price": "0.02224000000000000000", "resource": "mem", "unit": "GB/hour" }, { "currency": "CHF", "id": "305", "level": 0, "multiplier": 2592000, "price": "60.18000000000000000000", "resource": "msft_7jq_00341", "unit": "" }, { "currency": "CHF", "id": "285", "level": 0, "multiplier": 2592000, "price": "25.00000000000000000000", "resource": "msft_lwa_00135", "unit": "count/month" }, { "currency": "CHF", "id": "289", "level": 0, "multiplier": 2592000, "price": "45.00000000000000000000", "resource": "msft_p73_04837", "unit": "count/month" }, { "currency": "CHF", "id": "293", "level": 0, "multiplier": 2592000, "price": "500.00000000000000000000", "resource": "msft_tfa_00009", "unit": "count/month" }, { "currency": "USD", "id": "308", "level": 0, "multiplier": 2592000, "price": "58.07370000000000000000", "resource": "msft_7jq_00341", "unit": "" }, { "currency": "USD", "id": "309", "level": 0, "multiplier": 2592000, "price": "15.14085000000000000000", "resource": "msft_7nq_00302", "unit": "" }, { "currency": "USD", "id": "286", "level": 0, "multiplier": 2592000, "price": "22.50000000000000000000", "resource": "msft_lwa_00135", "unit": "count/month" }, { "currency": "USD", "id": "298", "level": 0, "multiplier": 2592000, "price": "8.20250000000000000000", "resource": "msft_p71_01031", "unit": "" }, { "currency": "USD", "id": "290", "level": 0, "multiplier": 2592000, "price": "40.50000000000000000000", "resource": "msft_p73_04837", "unit": "count/month" }, { "currency": "USD", "id": "302", "level": 0, "multiplier": 2592000, "price": "1.54400000000000000000", "resource": "msft_p73_04837_core", "unit": "" }, { "currency": "USD", "id": "294", "level": 0, "multiplier": 2592000, "price": "450.00000000000000000000", "resource": "msft_tfa_00009", "unit": "count/month" }, { "currency": "USD", "id": "310", "level": 0, "multiplier": 2592000, "price": "0.94570000000000000000", "resource": "msft_tfa_00523", "unit": "" }, { "currency": "EUR", "id": "311", "level": 0, "multiplier": 2592000, "price": "44.83410000000000000000", "resource": "msft_7jq_00341", "unit": "" }, { "currency": "EUR", "id": "312", "level": 0, "multiplier": 2592000, "price": "11.68905000000000000000", "resource": "msft_7nq_00302", "unit": "" }, { "currency": "EUR", "id": "287", "level": 0, "multiplier": 2592000, "price": "17.50000000000000000000", "resource": "msft_lwa_00135", "unit": "count/month" }, { "currency": "EUR", "id": "299", "level": 0, "multiplier": 2592000, "price": "6.33250000000000000000", "resource": "msft_p71_01031", "unit": "" }, { "currency": "EUR", "id": "291", "level": 0, "multiplier": 2592000, "price": "31.50000000000000000000", "resource": "msft_p73_04837", "unit": "count/month" }, { "currency": "EUR", "id": "303", "level": 0, "multiplier": 2592000, "price": "1.19200000000000000000", "resource": "msft_p73_04837_core", "unit": "" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/0000775000175000017500000000000013160535110025213 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/list_locations.json0000664000175000017500000000206613153541406031147 0ustar kamikami00000000000000{ "id": "locations", "type": "collection", "href": "/cloudapi/v3/locations", "items": [ { "id": "de/fkb", "type": "location", "href": "/cloudapi/v3/locations/de/fkb", "properties": { "name": "karlsruhe", "features": [ "SSD", "MULTIPLE_CPU" ] } }, { "id": "de/fra", "type": "location", "href": "/cloudapi/v3/locations/de/fra", "properties": { "name": "frankfurt", "features": [ "SSD", "MULTIPLE_CPU" ] } }, { "id": "us/las", "type": "location", "href": "/cloudapi/v3/locations/us/las", "properties": { "name": "lasvegas", "features": [ "SSD", "MULTIPLE_CPU" ] } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_node.json0000664000175000017500000000774713153541406031415 0ustar kamikami00000000000000{ "id": "srv-1", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1", "metadata": { "createdDate": "2016-10-18T07:28:05Z", "createdBy": "test@test.test", "etag": "e7cf186125f51f3d9511754a40dcd12c", "lastModifiedDate": "2016-10-18T07:28:05Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "A test node", "cores": 2, "ram": 4096, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "A test node boot volume", "type": "HDD", "size": 10, "image": "bvol-img", "imagePassword": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, "cpuFamily": "AMD_OPTERON" }, "entities": { "cdroms": { "id": "srv-1/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/cdroms", "items": [] }, "volumes": { "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes", "items": [ { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "A test node volume", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } } ] }, "nics": { "id": "srv-1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics", "items": [] } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_image.json0000664000175000017500000000162513153541406031241 0ustar kamikami00000000000000{ "id": "img-2", "type": "image", "href": "/cloudapi/v3/images/img-2", "metadata": { "createdDate": "2015-10-09T12:06:34Z", "createdBy": "test@test.te", "etag": "bbf76112358af2fc5dd1bf21de8988db", "lastModifiedDate": "2016-11-11T15:23:20Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "my-updated-image.img", "description": null, "location": "de/fkb", "size": 2, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "UNKNOWN", "imageType": "HDD", "public": false } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.json0000664000175000017500000000113213153541406033642 0ustar kamikami00000000000000{ "id": "nic-2", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics/nic-2", "metadata": { "createdDate": "2016-10-19T08:18:50Z", "createdBy": "test@test.te", "etag": "8679142b0b1b70c8b8c09a8b31e6ded9", "lastModifiedDate": "2016-10-19T08:18:50Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Creating a test network interface.", "mac": null, "ips": [], "dhcp": true, "lan": 1, "firewallActive": null, "nat": null } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_lans.json0000664000175000017500000001774613153541406030620 0ustar kamikami00000000000000{ "id": "dc-1/lans", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans", "items": [ { "id": "1", "type": "lan", "href": "/cloudapi/v3/datacenters/dc-1/lans/1", "metadata": { "createdDate": "2016-10-24T08:03:22Z", "createdBy": "test@test.te", "etag": "1c30221454228d239f0610b6eb3f41d7", "lastModifiedDate": "2016-10-24T08:03:22Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Switch for LAN 1", "public": false }, "entities": { "nics": { "id": "1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans/1/nics", "items": [ { "id": "dc5e1c66-7280-4a5c-8e13-51ce030e0980", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/d37abf6b-0980-4bf8-813b-b69863cf7b9e/nics/dc5e1c66-7280-4a5c-8e13-51ce030e0980", "metadata": { "createdDate": "2016-10-21T13:00:02Z", "createdBy": "test@test.te", "etag": "d8177b9c38b0366d26c28df0066ade9e", "lastModifiedDate": "2016-10-24T08:03:22Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Three", "mac": "02:01:49:87:3e:5f", "ips": [ "10.15.104.14" ], "dhcp": true, "lan": 1, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "dc5e1c66-7280-4a5c-8e13-51ce030e0980/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/d37abf6b-0980-4bf8-813b-b69863cf7b9e/nics/dc5e1c66-7280-4a5c-8e13-51ce030e0980/firewallrules" } } }, { "id": "912324a6-c057-445d-84dd-8fea82dfa719", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/811ab72b-f7f3-427b-8220-f031eae34f63/nics/912324a6-c057-445d-84dd-8fea82dfa719", "metadata": { "createdDate": "2016-10-21T13:00:02Z", "createdBy": "test@test.te", "etag": "d8177b9c38b0366d26c28df0066ade9e", "lastModifiedDate": "2016-10-24T08:03:22Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Two", "mac": "02:01:af:48:34:06", "ips": [ "10.15.104.13" ], "dhcp": true, "lan": 1, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "912324a6-c057-445d-84dd-8fea82dfa719/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/811ab72b-f7f3-427b-8220-f031eae34f63/nics/912324a6-c057-445d-84dd-8fea82dfa719/firewallrules" } } }, { "id": "8d9fb87c-79f7-4c3c-8f7b-e2050a881b16", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/b1a64861-5819-498a-899a-a57126050952/nics/8d9fb87c-79f7-4c3c-8f7b-e2050a881b16", "metadata": { "createdDate": "2016-10-21T13:00:02Z", "createdBy": "test@test.te", "etag": "d8177b9c38b0366d26c28df0066ade9e", "lastModifiedDate": "2016-10-24T08:03:22Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Four", "mac": "02:01:7c:7b:0b:5a", "ips": [ "10.15.104.12" ], "dhcp": true, "lan": 1, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "8d9fb87c-79f7-4c3c-8f7b-e2050a881b16/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/b1a64861-5819-498a-899a-a57126050952/nics/8d9fb87c-79f7-4c3c-8f7b-e2050a881b16/firewallrules" } } }, { "id": "76d7965d-19e5-491f-8b2f-1c4a3ef127b5", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/77ef2301-b4df-46aa-8544-2dae76c9d7b5/nics/76d7965d-19e5-491f-8b2f-1c4a3ef127b5", "metadata": { "createdDate": "2016-10-21T13:00:02Z", "createdBy": "test@test.te", "etag": "eb2031a5e259fcb4f714646aca0282c6", "lastModifiedDate": "2016-10-24T08:03:22Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Five", "mac": "02:01:bb:ad:7d:58", "ips": [ "10.15.104.11" ], "dhcp": true, "lan": 1, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "76d7965d-19e5-491f-8b2f-1c4a3ef127b5/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/77ef2301-b4df-46aa-8544-2dae76c9d7b5/nics/76d7965d-19e5-491f-8b2f-1c4a3ef127b5/firewallrules" } } } ] } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_attached_volumes.json0000664000175000017500000000777013153541406033206 0ustar kamikami00000000000000{ "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes", "items": [ { "id": "d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "metadata": { "createdDate": "2016-10-17T13:13:36Z", "createdBy": "test@test.test", "etag": "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate": "2016-10-17T13:47:52Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Volume one", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 3 } }, { "id": "vol-2", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata": { "createdDate": "2016-10-18T15:13:44Z", "createdBy": "test@test.test", "etag": "a96f7781920c2890c25f967e66e6ee91", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Volume two", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": null, "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": null, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 2 } }, { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Volume three", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_ip_block.json0000664000175000017500000000103413153541406032231 0ustar kamikami00000000000000{ "id": "ipb-2", "type": "ipblock", "href": "/cloudapi/v3/ipblocks/ipb-2", "metadata": { "createdDate": "2016-10-26T15:05:12Z", "createdBy": "test@test.te", "etag": "43e05b766899950bc8a5aeee0fd89b05", "lastModifiedDate": "2016-10-26T15:05:12Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "ips": [ "78.137.101.250" ], "location": "de/fkb", "size": 1, "name": "Test IP Block One" } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/list_volumes.json0000664000175000017500000001011513153541406030640 0ustar kamikami00000000000000{ "id": "dc-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/volumes", "items": [ { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Test Volume", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, { "id": "d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "metadata": { "createdDate": "2016-10-17T13:13:36Z", "createdBy": "test@test.test", "etag": "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate": "2016-10-17T13:47:52Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Updated storage name", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": null, "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": null } }, { "id": "21d7e7de-5054-4041-b691-717aa8b3c799", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/21d7e7de-5054-4041-b691-717aa8b3c799", "metadata": { "createdDate": "2016-10-17T11:33:20Z", "createdBy": "test@test.test", "etag": "a9d0b923527efae5e7071e9118e9eece", "lastModifiedDate": "2016-10-17T11:33:20Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Image and location and size. - volume", "type": "HDD", "size": 50, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_firewall_rule.json0000664000175000017500000000126213153541406033010 0ustar kamikami00000000000000{ "id": "fw2", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/fw2", "metadata": { "createdDate": "2016-10-19T09:55:10Z", "createdBy": "test@test.te", "etag": "00bb5b86562db1ed19ca38697e485160", "lastModifiedDate": "2016-10-19T09:55:10Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "HTTPs (SSL)", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 443, "portRangeEnd": 443 } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_load_balancer.json0000664000175000017500000000100413153541406032714 0ustar kamikami00000000000000{ "id": "bal-1", "type": "loadbalancer", "href": "/cloudapi/v3/datacenters/dc-1/loadbalancers/bal-1", "metadata": { "createdDate": "2019-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "71e8df57a58615b9e15400ede4138b41", "lastModifiedDate": "2019-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Updated Load Balancer", "ip": "123.124.125.126", "dhcp": false } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_ip_block.json0000664000175000017500000000106313153541406031716 0ustar kamikami00000000000000{ "id": "ipb-1", "type": "ipblock", "href": "/cloudapi/v3/ipblocks/ipb-1", "metadata": { "createdDate": "2016-10-26T15:05:36Z", "createdBy": "test@test.te", "etag": "acbf00bacf7ee48d4b8bc4e7413e1f30", "lastModifiedDate": "2016-10-26T15:05:36Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "ips": [ "11.12.13.14", "15.16.17.18" ], "location": "de/fkb", "size": 2, "name": "Test Created IP Block" } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_firewall_rule.json0000664000175000017500000000130113153541406032763 0ustar kamikami00000000000000{ "id": "fwr-1", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/fwr-1", "metadata": { "createdDate": "2016-10-19T11:08:04Z", "createdBy": "test@test.te", "etag": "2a21551ba4adf85d9fb04b05a6938bcc", "lastModifiedDate": "2016-10-19T11:08:04Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Test created firewall rule", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 80, "portRangeEnd": 80 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_load_balanced_nics.json0000664000175000017500000000465613153541406033423 0ustar kamikami00000000000000{ "id": "s-3/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics", "items": [ { "id": "nic-1", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1", "metadata": { "createdDate": "2016-10-17T15:46:38Z", "createdBy": "test@test.te", "etag": "dbd8216137cf0ec9951170f93fa8fa53", "lastModifiedDate": "2016-10-17T18:19:43Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test network interface", "mac": "02:01:0b:9d:4d:ce", "ips": [ "10.15.124.11" ], "dhcp": false, "lan": 2, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "nic-1/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1/firewallrules" } } }, { "id": "8f7ea845-cf40-49c8-9e93-68366f31d605", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/8f7ea845-cf40-49c8-9e93-68366f31d605", "metadata": { "createdDate": "2016-10-17T16:46:38Z", "createdBy": "test@test.te", "etag": "dbd8216137cf0ec9951170f93fa8fa54", "lastModifiedDate": "2016-10-17T19:19:43Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Another test network interface", "mac": "02:01:0b:9d:4d:dd", "ips": [ "10.15.124.12" ], "dhcp": false, "lan": 2, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "8f7ea845-cf40-49c8-9e93-68366f31d605/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1/firewallrules" } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.json0000664000175000017500000004352613153541406032575 0ustar kamikami00000000000000{ "id": "dc-1", "type": "datacenter", "href": "/cloudapi/v3/datacenters/dc-1", "metadata": { "createdDate": "2016-10-17T11:33:11Z", "createdBy": "test@test.test", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Test Data Center", "description": "This is a test data center.", "location": "de/fkb", "version": 35, "features": [ "SSD", "MULTIPLE_CPU" ] }, "entities": { "servers": { "id": "dc-1/servers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers", "items": [ { "id": "srv-1", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1", "metadata": { "createdDate": "2016-10-18T07:28:05Z", "createdBy": "test@test.test", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Made with a stray volume and included size.", "cores": 2, "ram": 4096, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Bootable and linkable", "type": "HDD", "size": 10, "image": "bvol-img", "imagePassword": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, "cpuFamily": "AMD_OPTERON" }, "entities": { "cdroms": { "id": "srv-1/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/cdroms", "items": [] }, "volumes": { "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes", "items": [ { "id": "d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/d6688ced-700d-4c9c-a3a7-9f7170c5edc3" }, { "id": "vol-2", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/vol-2" }, { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1" } ] }, "nics": { "id": "srv-1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics", "items": [] } } }, { "id": "s-3", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3", "metadata": { "createdDate": "2016-10-17T11:33:20Z", "createdBy": "test@test.test", "etag": "f6c94daafaf8ef852e2dc4b82a903c7b", "lastModifiedDate": "2016-10-17T16:13:43Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Updated this node", "cores": 3, "ram": 3072, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "21d7e7de-5054-4041-b691-717aa8b3c799", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/21d7e7de-5054-4041-b691-717aa8b3c799", "metadata": { "createdDate": "2016-10-17T11:33:20Z", "createdBy": "test@test.test", "etag": "a9d0b923527efae5e7071e9118e9eece", "lastModifiedDate": "2016-10-17T11:33:20Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Image and location and size. - volume", "type": "HDD", "size": 50, "image": "bvol-img", "imagePassword": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, "cpuFamily": "INTEL_XEON" }, "entities": { "cdroms": { "id": "s-3/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/cdroms", "items": [] }, "volumes": { "id": "s-3/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/volumes", "items": [ { "id": "21d7e7de-5054-4041-b691-717aa8b3c799", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/21d7e7de-5054-4041-b691-717aa8b3c799" } ] }, "nics": { "id": "s-3/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics", "items": [ { "id": "nic-1", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1" } ] } } } ] }, "volumes": { "id": "dc-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/volumes", "items": [ { "id": "vol-2", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata": { "createdDate": "2016-10-18T15:13:44Z", "createdBy": "test@test.test", "etag": "a96f7781920c2890c25f967e66e6ee91", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Unnamed HDD Storage", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": null, "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": null, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 2 } }, { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Bootable and linkable", "type": "HDD", "size": 10, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, { "id": "d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/d6688ced-700d-4c9c-a3a7-9f7170c5edc3", "metadata": { "createdDate": "2016-10-17T13:13:36Z", "createdBy": "test@test.test", "etag": "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate": "2016-10-17T13:47:52Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Updated storage name", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 3 } }, { "id": "21d7e7de-5054-4041-b691-717aa8b3c799", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/21d7e7de-5054-4041-b691-717aa8b3c799", "metadata": { "createdDate": "2016-10-17T11:33:20Z", "createdBy": "test@test.test", "etag": "a9d0b923527efae5e7071e9118e9eece", "lastModifiedDate": "2016-10-17T11:33:20Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Image and location and size. - volume", "type": "HDD", "size": 50, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } } ] }, "loadbalancers": { "id": "dc-1/loadbalancers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/loadbalancers", "items": [] }, "lans": { "id": "dc-1/lans", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans", "items": [ { "id": "2", "type": "lan", "href": "/cloudapi/v3/datacenters/dc-1/lans/2", "metadata": { "createdDate": "2016-10-17T18:19:43Z", "createdBy": "test@test.test", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Switch for LAN 2", "public": false }, "entities": { "nics": { "id": "2/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans/2/nics", "items": [ { "id": "nic-1", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1" } ] } } } ] } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/create_node.json0000664000175000017500000000222213153541406030363 0ustar kamikami00000000000000{ "id": "srv-2", "type": "server", "href": "cloudapi/v3/datacenters/f3f7ca3c-6f3d-4a4d-b3d6-15853ae5ba78/servers/srv-2", "metadata": { "createdDate": "2016-10-19T13:25:19Z", "createdBy": "test@test.te", "etag": "9bea2412ac556b402a07260fc0d1603f", "lastModifiedDate": "2016-10-19T13:25:19Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Test", "cores": 1, "ram": 512, "availabilityZone": "ZONE_1", "vmState": null, "bootCdrom": null, "bootVolume": null, "cpuFamily": "INTEL_XEON" }, "entities": { "volumes": { "id": "srv-2/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/f3f7ca3c-6f3d-4a4d-b3d6-15853ae5ba78/servers/srv-2/volumes", "items": [ { "id": "53abb4de-b37b-4025-b139-3e09141e20bb", "type": "volume", "href": "/cloudapi/v3/datacenters/srv-2/volumes/53abb4de-b37b-4025-b139-3e09141e20bb" } ] } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/list_images.json0000664000175000017500000001624613153541406030426 0ustar kamikami00000000000000{ "id": "images", "type": "collection", "href": "/cloudapi/v3/images", "items": [ { "id": "img-2", "type": "image", "href": "/cloudapi/v3/images/img-2", "metadata": { "createdDate": "2016-06-02T11:33:49Z", "createdBy": "System", "etag": "9909709d99655c6f31aca789998d7d89", "lastModifiedDate": "2016-06-02T11:33:49Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-One-CDROM", "description": "", "location": "us/las", "size": 0.23, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX", "imageType": "CDROM", "public": false } }, { "id": "img-1", "type": "image", "href": "/cloudapi/v3/images/img-1", "metadata": { "createdDate": "2014-11-14T15:22:19Z", "createdBy": "System", "etag": "957e0eac7456fa7554e73bf0d18860eb", "lastModifiedDate": "2014-11-14T15:22:19Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-Two-CDROM", "description": "", "location": "us/las", "size": 4, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "OTHER", "imageType": "CDROM", "public": true } }, { "id": "62c525d9-b2cf-11e5-afa0-52540066fee9", "type": "image", "href": "/cloudapi/v3/images/62c525d9-b2cf-11e5-afa0-52540066fee9", "metadata": { "createdDate": "2016-01-04T10:39:08Z", "createdBy": "System", "etag": "28cb3e31d2ef98d37b367fe15d40373e", "lastModifiedDate": "2016-01-04T10:39:08Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-Three-CDROM", "description": "", "location": "de/fkb", "size": 0.37, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX", "imageType": "CDROM", "public": true } }, { "id": "e2c323ba-3c6d-11e6-9336-52540005ab80", "type": "image", "href": "/cloudapi/v3/images/e2c323ba-3c6d-11e6-9336-52540005ab80", "metadata": { "createdDate": "2016-06-27T13:48:53Z", "createdBy": "System", "etag": "216e2beb5e9bb5f7c3233ca3d0c4e9b2", "lastModifiedDate": "2016-06-27T13:48:53Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-Four-HDD", "description": "", "location": "de/fkb", "size": 0.28, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "OTHER", "imageType": "HDD", "public": false } }, { "id": "f1d2be4a-7937-11e4-8053-52540066fee9", "type": "image", "href": "/cloudapi/v3/images/f1d2be4a-7937-11e4-8053-52540066fee9", "metadata": { "createdDate": "2014-12-01T08:56:31Z", "createdBy": "System", "etag": "fd23753bfae52a11bbdfdd478296cb2a", "lastModifiedDate": "2014-12-01T08:56:31Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-Two-HDD", "description": "", "location": "de/fkb", "size": 0.45, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX", "imageType": "HDD", "public": true } }, { "id": "dfa08a48-7937-11e4-8053-52540066fee9", "type": "image", "href": "/cloudapi/v3/images/dfa08a48-7937-11e4-8053-52540066fee9", "metadata": { "createdDate": "2014-12-01T08:56:00Z", "createdBy": "System", "etag": "66da22f2893f1de8945c27e11bbc7ebf", "lastModifiedDate": "2014-12-01T08:56:00Z", "lastModifiedBy": "System", "state": "AVAILABLE" }, "properties": { "name": "Test-Image-Three-HDD", "description": "", "location": "de/fra", "size": 0.08, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX", "imageType": "HDD", "public": true } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_lan.json0000664000175000017500000000071613153541406030712 0ustar kamikami00000000000000{ "id" : "10", "type" : "lan", "href" : "/cloudapi/v3/datacenters/dc-1/lans/10", "metadata" : { "createdDate": "2016-10-17T11:33:11Z", "createdBy": "test@test.te", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties" : { "name" : "Test Created Lan", "public" : true } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_rename_datacenter.json0000664000175000017500000000246013153541406032254 0ustar kamikami00000000000000{ "id": "dc-1", "type": "datacenter", "href": "/cloudapi/v3/datacenters/dc-1", "metadata" : { "createdDate": "2016-10-17T11:33:11Z", "createdBy": "test@test.test", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.test", "state" : "BUSY" }, "properties" : { "name": "Test Data Center", "description": "This is a test data center.", "location": "de/fkb", "version": 35, "features": [ "SSD", "MULTIPLE_CPU" ] }, "entities" : { "servers" : { "id": "dc-1/servers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers" }, "volumes" : { "id": "dc-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/volumes" }, "loadbalancers" : { "id": "dc-1/loadbalancers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/loadbalancers", "items": [] }, "lans" : { "id": "dc-1/lans", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.json0000664000175000017500000000154113153541406033665 0ustar kamikami00000000000000{ "id": "nic-1", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1", "metadata": { "createdDate": "2016-10-17T15:46:38Z", "createdBy": "test@test.te", "etag": "23dd052b608b59be38cef62765af7039", "lastModifiedDate": "2016-10-19T09:29:00Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Updated from LibCloud", "mac": "02:01:0b:9d:4d:ce", "ips": [ "10.15.124.11" ], "dhcp": true, "lan": 1, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "nic-1/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1/firewallrules" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/list_snapshots.json0000664000175000017500000000244313153541406031175 0ustar kamikami00000000000000{ "id": "snapshots", "type": "collection", "href": "/cloudapi/v3/snapshots", "items": [ { "id": "sshot", "type": "snapshot", "href": "/cloudapi/v3/snapshots/sshot", "metadata": { "createdDate": "2016-10-26T11:38:45Z", "createdBy": "test@test.te", "etag": "01873262ac042b5f44ed33b4241225b4", "lastModifiedDate": "2016-10-26T11:38:45Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Balancer Testing 1 Storage-Snapshot-10/26/2016", "description": "Created from \"Balancer Testing 1 Storage\" in Data Center \"Snapshot\"", "location": "us/las", "size": 10, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX" } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_set_inet_access.json0000664000175000017500000000154113153541406031745 0ustar kamikami00000000000000{ "id": "nic-2", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2", "metadata": { "createdDate": "2016-10-19T08:18:55Z", "createdBy": "test@test.te", "etag": "56f8d8bbdc84faad4188f647a49a565b", "lastModifiedDate": "2016-10-19T09:44:59Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Updated from LibCloud", "mac": "02:01:68:c1:e8:88", "ips": [ "11.12.13.14" ], "dhcp": true, "lan": 1, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "nic-2/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.json0000664000175000017500000000154713153541406034171 0ustar kamikami00000000000000{ "id": "nic-2", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2", "metadata": { "createdDate": "2016-10-17T15:46:38Z", "createdBy": "test@test.te", "etag": "dbd8216137cf0ec9951170f93fa8fa53", "lastModifiedDate": "2016-10-17T18:19:43Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Updated from LibCloud", "mac": "02:01:0b:9d:4d:ce", "ips": [ "10.15.124.11" ], "dhcp": false, "lan": 2, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "nic-1/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_load_balancer.json0000664000175000017500000000076313153541406032710 0ustar kamikami00000000000000{ "id": "bal-1", "type": "loadbalancer", "href": "/cloudapi/v3/datacenters/dc-1/loadbalancers/bal-1", "metadata": { "createdDate": "2016-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "71e8df57a58615b9e15400ede4138b41", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Test load balancer", "ip": null, "dhcp": true } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_load_balancers.json0000664000175000017500000002564713153541406032613 0ustar kamikami00000000000000{ "id": "dc-2/loadbalancers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers", "items": [ { "id": "bal-1", "type": "loadbalancer", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/bal-1", "metadata": { "createdDate": "2016-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "71e8df57a58615b9e15400ede4138b41", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test One", "ip": "111.112.113.114", "dhcp": true }, "entities": { "balancednics": { "id": "bal-1/balancednics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/bal-1/balancednics", "items": [ { "id": "68e3bd55-55a1-4fa0-8903-8c3cf23ee30a", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-2/servers/92445f57-5378-4a5b-8b53-f0f36fb8e6ad/nics/68e3bd55-55a1-4fa0-8903-8c3cf23ee30a", "metadata": { "createdDate": "2016-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "20e915cf08106b7576f6b25b5c0a2bd6", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": null, "mac": "02:01:33:58:8d:e1", "ips": [ "162.254.25.6" ], "dhcp": true, "lan": 3, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "68e3bd55-55a1-4fa0-8903-8c3cf23ee30a/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/servers/92445f57-5378-4a5b-8b53-f0f36fb8e6ad/nics/68e3bd55-55a1-4fa0-8903-8c3cf23ee30a/firewallrules" } } } ] } } }, { "id": "24d24a20-1161-4870-8b49-434622a18875", "type": "loadbalancer", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/24d24a20-1161-4870-8b49-434622a18875", "metadata": { "createdDate": "2016-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "71e8df57a58615b9e15400ede4138b41", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test Two", "ip": "162.254.25.225", "dhcp": true }, "entities": { "balancednics": { "id": "24d24a20-1161-4870-8b49-434622a18875/balancednics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/24d24a20-1161-4870-8b49-434622a18875/balancednics", "items": [ { "id": "0485e083-3461-447a-8c5a-392be3c76473", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-2/servers/7571b2d9-312a-45be-8d68-58c269c3911c/nics/0485e083-3461-447a-8c5a-392be3c76473", "metadata": { "createdDate": "2016-10-25T08:49:48Z", "createdBy": "test@test.te", "etag": "20e915cf08106b7576f6b25b5c0a2bd6", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": null, "mac": "02:01:6f:92:d0:63", "ips": [ "162.254.25.225" ], "dhcp": true, "lan": 5, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "0485e083-3461-447a-8c5a-392be3c76473/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/servers/7571b2d9-312a-45be-8d68-58c269c3911c/nics/0485e083-3461-447a-8c5a-392be3c76473/firewallrules" } } }, { "id": "2e9c54f4-85c0-4d10-8369-cf5e3a6e7b68", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-2/servers/ff3b523c-21bd-453d-80a3-c2bcdae1f509/nics/2e9c54f4-85c0-4d10-8369-cf5e3a6e7b68", "metadata": { "createdDate": "2016-10-25T14:01:03Z", "createdBy": "test@test.te", "etag": "20e915cf08106b7576f6b25b5c0a2bd6", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": null, "mac": "02:01:ce:53:f1:1a", "ips": [ "162.254.25.225" ], "dhcp": true, "lan": 5, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "2e9c54f4-85c0-4d10-8369-cf5e3a6e7b68/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/servers/ff3b523c-21bd-453d-80a3-c2bcdae1f509/nics/2e9c54f4-85c0-4d10-8369-cf5e3a6e7b68/firewallrules" } } }, { "id": "707d7aa4-8311-4a61-82a4-a80333e9ecc2", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-2/servers/2e77a9cc-2338-43e6-84bf-e4c2ff9be265/nics/707d7aa4-8311-4a61-82a4-a80333e9ecc2", "metadata": { "createdDate": "2016-10-25T15:36:06Z", "createdBy": "test@test.te", "etag": "20e915cf08106b7576f6b25b5c0a2bd6", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": null, "mac": "02:01:8b:77:cd:b6", "ips": [ "162.254.25.225" ], "dhcp": true, "lan": 5, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "707d7aa4-8311-4a61-82a4-a80333e9ecc2/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/servers/2e77a9cc-2338-43e6-84bf-e4c2ff9be265/nics/707d7aa4-8311-4a61-82a4-a80333e9ecc2/firewallrules" } } }, { "id": "710f6842-440d-4fdc-878e-0c329037d4a9", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-2/servers/fc0f7e86-a7de-4cee-84b5-409cc8b89ad2/nics/710f6842-440d-4fdc-878e-0c329037d4a9", "metadata": { "createdDate": "2016-10-25T14:01:03Z", "createdBy": "test@test.te", "etag": "20e915cf08106b7576f6b25b5c0a2bd6", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": null, "mac": "02:01:00:65:da:a9", "ips": [ "162.254.25.225" ], "dhcp": true, "lan": 5, "firewallActive": false, "nat": false }, "entities": { "firewallrules": { "id": "710f6842-440d-4fdc-878e-0c329037d4a9/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/servers/fc0f7e86-a7de-4cee-84b5-409cc8b89ad2/nics/710f6842-440d-4fdc-878e-0c329037d4a9/firewallrules" } } } ] } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_volume.json0000664000175000017500000000177213153541406031471 0ustar kamikami00000000000000{ "id": "vol-2", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata": { "createdDate": "2016-10-17T13:13:36Z", "createdBy": "test@test.te", "etag": "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate": "2016-10-17T13:47:52Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Updated storage name", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 3 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_firewall_rules.json0000664000175000017500000000553513153541406032673 0ustar kamikami00000000000000{ "id": "nic-2/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules", "items": [ { "id": "fwr-1", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/fwr-1", "metadata": { "createdDate": "2016-10-19T11:08:10Z", "createdBy": "test@test.te", "etag": "b91a2e082a7422dafb79d84a07fb2a28", "lastModifiedDate": "2016-10-19T11:19:04Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test updated firewall rule", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 80, "portRangeEnd": 80 } }, { "id": "fw2", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/fw2", "metadata": { "createdDate": "2016-10-19T09:55:10Z", "createdBy": "test@test.te", "etag": "00bb5b86562db1ed19ca38697e485160", "lastModifiedDate": "2016-10-19T09:55:10Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "HTTPs (SSL)", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 443, "portRangeEnd": 443 } }, { "id": "6238b1e2-c706-4dc9-80a9-307fa0bd4287", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/6238b1e2-c706-4dc9-80a9-307fa0bd4287", "metadata": { "createdDate": "2016-10-19T09:55:10Z", "createdBy": "test@test.te", "etag": "00bb5b86562db1ed19ca38697e485160", "lastModifiedDate": "2016-10-19T09:55:10Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "HTTP Webserver", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 80, "portRangeEnd": 80 } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_image.json0000664000175000017500000000165313153541406031540 0ustar kamikami00000000000000{ "id": "img-2", "type": "image", "href": "/cloudapi/v3/images/img-2", "metadata": { "createdDate": "2015-10-09T12:06:34Z", "createdBy": "test@test.te", "etag": "bbf76112358af2fc5dd1bf21de8988db", "lastModifiedDate": "2015-11-11T15:23:20Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "vivid-server-cloudimg-amd64-disk1.img", "description": null, "location": "de/fkb", "size": 2, "cpuHotPlug": false, "cpuHotUnplug": false, "ramHotPlug": false, "ramHotUnplug": false, "nicHotPlug": false, "nicHotUnplug": false, "discVirtioHotPlug": false, "discVirtioHotUnplug": false, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "UNKNOWN", "imageType": "HDD", "public": false } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/create_volume_snapshot.json0000664000175000017500000000156313153541406032673 0ustar kamikami00000000000000{ "id": "sshot", "type": "snapshot", "href": "/cloudapi/v3/snapshots/sshot", "metadata": { "createdDate": "2016-10-26T11:38:45Z", "createdBy": "test@test.te", "etag": "01873262ac042b5f44ed33b4241225b4", "lastModifiedDate": "2016-10-26T11:38:45Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Test Created Snapshot", "description": "Test Created Snapshot", "location": "us/las", "size": 10, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX" } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.json0000664000175000017500000000177213153541406031767 0ustar kamikami00000000000000{ "id": "vol-2", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata": { "createdDate": "2016-10-17T13:13:36Z", "createdBy": "test@test.te", "etag": "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate": "2016-10-17T13:47:52Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Updated storage name", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 3 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/create_volume.json0000664000175000017500000000200013153541406030737 0ustar kamikami00000000000000{ "id" : "vol-2", "type" : "volume", "href" : "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata" : { "createdDate" : "2016-10-17T13:13:36Z", "createdBy" : "test@test.te", "etag" : "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate" : "2016-10-17T13:47:52Z", "lastModifiedBy" : "test@test.te", "state" : "BUSY" }, "properties" : { "name": "Updated storage name", "type": "HDD", "size": 40, "availabilityZone": "AUTO", "image": "bvol-img", "imagePassword": null, "sshKeys": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 3 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.json0000664000175000017500000000465613153541406033553 0ustar kamikami00000000000000{ "id": "s-3/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics", "items": [ { "id": "nic-1", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1", "metadata": { "createdDate": "2016-10-17T15:46:38Z", "createdBy": "test@test.te", "etag": "dbd8216137cf0ec9951170f93fa8fa53", "lastModifiedDate": "2016-10-17T18:19:43Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test network interface", "mac": "02:01:0b:9d:4d:ce", "ips": [ "10.15.124.11" ], "dhcp": false, "lan": 2, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "nic-1/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1/firewallrules" } } }, { "id": "8f7ea845-cf40-49c8-9e93-68366f31d605", "type": "nic", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/8f7ea845-cf40-49c8-9e93-68366f31d605", "metadata": { "createdDate": "2016-10-17T16:46:38Z", "createdBy": "test@test.te", "etag": "dbd8216137cf0ec9951170f93fa8fa54", "lastModifiedDate": "2016-10-17T19:19:43Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Another test network interface", "mac": "02:01:0b:9d:4d:dd", "ips": [ "10.15.124.12" ], "dhcp": false, "lan": 2, "firewallActive": true, "nat": false }, "entities": { "firewallrules": { "id": "8f7ea845-cf40-49c8-9e93-68366f31d605/firewallrules", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-1/firewallrules" } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/attach_volume.json0000664000175000017500000000175613153541406030761 0ustar kamikami00000000000000{ "id" : "vol-2", "type" : "volume", "href" : "/cloudapi/v3/datacenters/dc-1/volumes/vol-2", "metadata" : { "createdDate" : "2016-10-17T13:13:36Z", "createdBy" : "test@test.te", "etag" : "c1800ce349033f9cd2c095ea1ea4976a", "lastModifiedDate" : "2016-10-17T13:47:52Z", "lastModifiedBy" : "test@test.te", "state" : "BUSY" }, "properties" : { "name" : "Updated storage name", "type" : "HDD", "size" : 40, "image" : "bvol-img", "imagePassword" : null, "sshKeys": null, "bus" : "VIRTIO", "licenceType" : "LINUX", "cpuHotPlug" : true, "cpuHotUnplug" : false, "ramHotPlug" : true, "ramHotUnplug" : false, "nicHotPlug" : true, "nicHotUnplug" : true, "discVirtioHotPlug" : true, "discVirtioHotUnplug" : true, "discScsiHotPlug" : false, "discScsiHotUnplug" : false, "deviceNumber" : 3 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.json0000664000175000017500000000354013153541406032143 0ustar kamikami00000000000000{ "id": "datacenters", "type": "collection", "href": "/cloudapi/v3/datacenters/", "items": [ { "id": "dc-1", "type": "datacenter", "href": "/cloudapi/v3/datacenters/dc-1", "metadata": { "createdDate": "2016-10-14T07:24:59Z", "createdBy": "test@test.test", "etag": "bdddec2287cb7723e86ac088bf644606", "lastModifiedDate": "2016-10-17T15:27:25Z", "lastModifiedBy": "test@test.test", "state": "AVAILABLE" }, "properties": { "name": "Test One.", "description": "A test data center", "location": "de/fra", "version": 3, "features": [ "SSD", "MULTIPLE_CPU" ] }, "entities": { "servers": { "id": "983a45be-a9aa-427a-8117-271ce9f392bb/servers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers" }, "volumes": { "id": "983a45be-a9aa-427a-8117-271ce9f392bb/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/volumes" }, "loadbalancers": { "id": "983a45be-a9aa-427a-8117-271ce9f392bb/loadbalancers", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/loadbalancers" }, "lans": { "id": "983a45be-a9aa-427a-8117-271ce9f392bb/lans", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans" } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_list_ip_blocks.json0000664000175000017500000000303613153541406031613 0ustar kamikami00000000000000{ "id": "ipblocks", "type": "collection", "href": "/cloudapi/v3/ipblocks", "items": [ { "id": "ipb-1", "type": "ipblock", "href": "/cloudapi/v3/ipblocks/ipb-1", "metadata": { "createdDate": "2016-10-26T15:05:36Z", "createdBy": "test@test.te", "etag": "acbf00bacf7ee48d4b8bc4e7413e1f30", "lastModifiedDate": "2016-10-26T15:05:36Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "ips": [ "78.137.101.252", "78.137.101.251" ], "location": "de/fkb", "size": 2, "name": "Test IP Block One" } }, { "id": "ipb-2", "type": "ipblock", "href": "/cloudapi/v3/ipblocks/ipb-2", "metadata": { "createdDate": "2016-10-26T15:05:12Z", "createdBy": "test@test.te", "etag": "43e05b766899950bc8a5aeee0fd89b05", "lastModifiedDate": "2016-10-26T15:05:12Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "ips": [ "78.137.101.250" ], "location": "de/fkb", "size": 1, "name": "Test IP Block One" } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_load_balancer.json0000664000175000017500000000132713153541406033222 0ustar kamikami00000000000000{ "id": "bal-1", "type": "loadbalancer", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/bal-1", "metadata": { "createdDate": "2016-10-26T13:02:33Z", "createdBy": "test@test.te", "etag": "71e8df57a58615b9e15400ede4138b41", "lastModifiedDate": "2016-10-26T13:02:33Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test One", "ip": "111.112.113.114", "dhcp": true }, "entities": { "balancednics": { "id": "bal-1/balancednics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-2/loadbalancers/bal-1/balancednics" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_snapshot.json0000664000175000017500000000155013153541406032311 0ustar kamikami00000000000000{ "id": "sshot", "type": "snapshot", "href": "/cloudapi/v3/snapshots/sshot", "metadata": { "createdDate": "2016-10-26T11:38:45Z", "createdBy": "test@test.te", "etag": "01873262ac042b5f44ed33b4241225b4", "lastModifiedDate": "2016-10-26T11:38:45Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test Snapshot", "description": "Test Snapshot", "location": "us/las", "size": 10, "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX" } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_snapshot.json0000664000175000017500000000155513153541406032020 0ustar kamikami00000000000000{ "id": "sshot", "type": "snapshot", "href": "/cloudapi/v3/snapshots/sshot", "metadata": { "createdDate": "2016-10-26T11:38:45Z", "createdBy": "test@test.te", "etag": "01873262ac042b5f44ed33b4241225b4", "lastModifiedDate": "2016-10-26T11:38:45Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Updated snapshot", "description": "Updated snapshot", "location": "us/las", "size": 10, "cpuHotPlug": true, "cpuHotUnplug": true, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "licenceType": "LINUX" } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_lan.json0000664000175000017500000000120313153541406031217 0ustar kamikami00000000000000{ "id" : "10", "type" : "lan", "href" : "/cloudapi/v3/datacenters/dc-1/lans/10", "metadata" : { "createdDate": "2016-10-17T11:33:11Z", "createdBy": "test@test.te", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties" : { "name" : "Test Created Lan", "public" : true }, "entities": { "nics": { "id": "10/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans/10/nics" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_lan.json0000664000175000017500000000120313153541406030721 0ustar kamikami00000000000000{ "id" : "10", "type" : "lan", "href" : "/cloudapi/v3/datacenters/dc-1/lans/10", "metadata" : { "createdDate": "2016-10-17T11:33:11Z", "createdBy": "test@test.te", "etag": "53b215b8ec0356a649955dab019845a4", "lastModifiedDate": "2016-10-18T15:13:44Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties" : { "name" : "Test Updated Lan", "public" : true }, "entities": { "nics": { "id": "10/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/lans/10/nics" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/list_nodes.json0000664000175000017500000001524013153541406030262 0ustar kamikami00000000000000{ "id": "dc-1/servers", "type": "collection", "href": "/cloudapi/v3/datacenters/9e9cb896-e8e7-4f52-a808-ac808baac814/servers", "items": [ { "id": "srv-1", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1", "metadata": { "createdDate": "2016-10-18T07:28:05Z", "createdBy": "test@test.te", "etag": "e7cf186125f51f3d9511754a40dcd12c", "lastModifiedDate": "2016-10-18T07:28:05Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test Node.", "cores": 2, "ram": 4096, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "matfin@stackpointcloud.com", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test Node Volume", "type": "HDD", "size": 10, "image": "bvol-img", "imagePassword": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, "cpuFamily": "AMD_OPTERON" }, "entities": { "cdroms": { "id": "srv-1/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/cdroms", "items": [] }, "volumes": { "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes", "items": [ { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1" } ] }, "nics": { "id": "srv-1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics", "items": [] } } }, { "id": "srv-1", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1", "metadata": { "createdDate": "2016-10-18T07:28:05Z", "createdBy": "test@test.te", "etag": "e7cf186125f51f3d9511754a40dcd12c", "lastModifiedDate": "2016-10-18T07:28:05Z", "lastModifiedBy": "matfin@stackpointcloud.com", "state": "AVAILABLE" }, "properties": { "name": "Test Node.", "cores": 2, "ram": 4096, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1", "metadata": { "createdDate": "2016-10-18T07:20:41Z", "createdBy": "matfin@stackpointcloud.com", "etag": "33f6b8d506e7ad756e8554b915f29c61", "lastModifiedDate": "2016-10-18T07:20:41Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "Test Node Volume", "type": "HDD", "size": 10, "image": "bvol-img", "imagePassword": null, "bus": "VIRTIO", "licenceType": "LINUX", "cpuHotPlug": true, "cpuHotUnplug": false, "ramHotPlug": true, "ramHotUnplug": false, "nicHotPlug": true, "nicHotUnplug": true, "discVirtioHotPlug": true, "discVirtioHotUnplug": true, "discScsiHotPlug": false, "discScsiHotUnplug": false, "deviceNumber": 1 } }, "cpuFamily": "AMD_OPTERON" }, "entities": { "cdroms": { "id": "srv-1/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/cdroms", "items": [] }, "volumes": { "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes", "items": [ { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1" } ] }, "nics": { "id": "srv-1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics", "items": [] } } } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_location.json0000664000175000017500000000034113153541406032257 0ustar kamikami00000000000000{ "id": "de/fkb", "type": "location", "href": "/cloudapi/v3/locations/de/fkb", "properties": { "name": "karlsruhe", "features": [ "SSD", "MULTIPLE_CPU" ] } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.json0000664000175000017500000000105313153541406032245 0ustar kamikami00000000000000{ "id": "dc-1", "type": "datacenter", "href": "/cloudapi/v3/datacenters/dc-1", "metadata": { "createdDate": "2016-10-18T17:20:56Z", "createdBy": "test@test.te", "etag": "c2d3d4d9bbdc0fff7d3c5c3864a68a46", "lastModifiedDate": "2016-10-18T17:20:56Z", "lastModifiedBy": "test@test.te", "state": "BUSY" }, "properties": { "name": "Test Data Center", "description": "Test Data Center.", "location": "us/las", "version": null, "features": [] } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_update_node.json0000664000175000017500000000250013153541406031075 0ustar kamikami00000000000000{ "id": "srv-1", "type": "server", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1", "metadata" : { "createdDate": "2016-10-18T07:28:05Z", "createdBy": "test@test.test", "etag": "e7cf186125f51f3d9511754a40dcd12c", "lastModifiedDate": "2016-10-18T07:28:05Z", "lastModifiedBy": "test@test.test", "state" : "BUSY" }, "properties" : { "name" : "A test node", "cores" : 1, "ram" : 512, "bootCdrom" : null, "availabilityZone": "AUTO", "vmState": "RUNNING", "bootCdrom": null, "bootVolume": { "id": "bvol-1", "type": "volume", "href": "/cloudapi/v3/datacenters/dc-1/volumes/bvol-1" }, "cpuFamily" : "AMD_OPTERON" }, "entities" : { "cdroms": { "id": "srv-1/cdroms", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/cdroms" }, "volumes": { "id": "srv-1/volumes", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/volumes" }, "nics": { "id": "srv-1/nics", "type": "collection", "href": "/cloudapi/v3/datacenters/dc-1/servers/srv-1/nics" } } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/profitbricks/ex_describe_firewall_rule.json0000664000175000017500000000126513153541406033311 0ustar kamikami00000000000000{ "id": "fw2", "type": "firewall-rule", "href": "/cloudapi/v3/datacenters/dc-1/servers/s-3/nics/nic-2/firewallrules/fw2", "metadata": { "createdDate": "2016-10-19T09:55:10Z", "createdBy": "test@test.te", "etag": "00bb5b86562db1ed19ca38697e485160", "lastModifiedDate": "2016-10-19T09:55:10Z", "lastModifiedBy": "test@test.te", "state": "AVAILABLE" }, "properties": { "name": "HTTPs (SSL)", "protocol": "TCP", "sourceMac": null, "sourceIp": null, "targetIp": null, "icmpCode": null, "icmpType": null, "portRangeStart": 443, "portRangeEnd": 443 } }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vpsnet/0000775000175000017500000000000013160535110024031 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.js0000664000175000017500000001102112701023453033756 0ustar kamikami00000000000000[{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"},{"id":10,"label":"CentOS 5.2 x64"},{"id":11,"label":"Gentoo 2008.0 x64"},{"id":18,"label":"Ubuntu 8.04 x64 LAMP"},{"id":19,"label":"Ubuntu 8.04 x64 MySQL"},{"id":20,"label":"Ubuntu 8.04 x64 Postfix"},{"id":21,"label":"Ubuntu 8.04 x64 Apache"},{"id":22,"label":"CentOS 5.2 x64 MySQL"},{"id":23,"label":"CentOS 5.2 x64 LAMP"},{"id":24,"label":"CentOS 5.2 x64 HAProxy"},{"id":25,"label":"CentOS 5.2 x64 Postfix"},{"id":26,"label":"CentOS 5.2 x64 Varnish"},{"id":27,"label":"CentOS 5.2 x64 Shoutcast"},{"id":28,"label":"CentOS 5.2 x64 Apache"},{"id":40,"label":"cPanel"},{"id":42,"label":"Debian 5.0 (Lenny) x64"},{"id":58,"label":"Django on Ubuntu 8.04 (x86)"},{"id":59,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":60,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":61,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":62,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":63,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":64,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":65,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":66,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":67,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":68,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":69,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":70,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":71,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":72,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":128,"label":"CentOS Optimised"},{"id":129,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":130,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":131,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":132,"label":"Debian Optimised"},{"id":133,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":134,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":135,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":136,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":137,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":138,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":139,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":140,"label":"Optimised Debian + Apache + MySQL + PHP + Magento"},{"id":141,"label":"Optimised Debian + NGINX + MySQL + PHP + Magento"},{"id":142,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":2,"label":"USA VPS Cloud"}},{"cloud":{"system_templates":[{"id":15,"label":"Ubuntu 8.04 x64"},{"id":16,"label":"CentOS 5.2 x64"},{"id":17,"label":"Gentoo 2008.0 x64"},{"id":29,"label":"Ubuntu 8.04 x64 LAMP"},{"id":30,"label":"Ubuntu 8.04 x64 MySQL"},{"id":31,"label":"Ubuntu 8.04 x64 Postfix"},{"id":32,"label":"Ubuntu 8.04 x64 Apache"},{"id":33,"label":"CentOS 5.2 x64 MySQL"},{"id":34,"label":"CentOS 5.2 x64 LAMP"},{"id":35,"label":"CentOS 5.2 x64 HAProxy"},{"id":36,"label":"CentOS 5.2 x64 Postfix"},{"id":37,"label":"CentOS 5.2 x64 Varnish"},{"id":38,"label":"CentOS 5.2 x64 Shoutcast"},{"id":39,"label":"CentOS 5.2 x64 Apache"},{"id":41,"label":"cPanel"},{"id":43,"label":"Debian 5.0 (Lenny) x64"},{"id":44,"label":"Django on Ubuntu 8.04 (x86)"},{"id":45,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":46,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":47,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":48,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":49,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":50,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":51,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":52,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":53,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":54,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":55,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":56,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":57,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":73,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":148,"label":"CentOS Optimised"},{"id":149,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":150,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":151,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":152,"label":"Debian Optimised"},{"id":153,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":154,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":155,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":156,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":157,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":158,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":159,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":160,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":3,"label":"UK VPS Cloud"}}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_2/0000775000175000017500000000000013160535107025313 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml0000664000175000017500000000114612701023453033113 0ustar kamikami00000000000000 small 1 1024 medium 4 4096 large 8 8192 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml0000664000175000017500000000141212701023453027727 0ustar kamikami00000000000000 5 Compute 5 small ACTIVE DISK hda 192.168.0.1 02:00:c0:a8:00:01 192.168.1.1 02:00:c0:a8:01:01 compute-5 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/0000775000175000017500000000000013160535107025317 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml0000664000175000017500000000063112701023453027725 0ustar kamikami00000000000000 5 test-volume oneadmin READY DATABLOCK Attached storage 1000 ext3 NO YES apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml0000664000175000017500000000025512701023453027271 0ustar kamikami00000000000000 10 Debian 7.1 LAMP 2048 file:///images/debian/wheezy.img apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml0000664000175000017500000000105212701023453030014 0ustar kamikami00000000000000 15 Compute 15 Test small ACTIVE FILE hda 192.168.122.2 02:00:c0:a8:7a:02 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml0000664000175000017500000000024512701023453027275 0ustar kamikami00000000000000 15 Debian Sid 1024 file:///images/debian/sid.img apache-libcloud-2.2.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml0000664000175000017500000000131712701023453027737 0ustar kamikami00000000000000 5 Compute 5 Test small ACTIVE FILE hda FILE sda 192.168.122.2 02:00:c0:a8:7a:02 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/0000775000175000017500000000000013160535107023256 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setMetadata_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_set0000664000175000017500000000123412701023453034062 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-setMetadata_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMetadata_post", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "PENDING", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name/setMetadata", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131014_deprecate.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131014_dep0000664000175000017500000000123712701023453033101 0ustar kamikami00000000000000{ "status": "PENDING", "kind": "compute#operation", "name": "operation-1394594316110-4f4604ad0e708-2e4622ab", "startTime": "2014-03-11T20:18:36.194-07:00", "insertTime": "2014-03-11T20:18:36.110-07:00", "targetId": "10034929421075729520", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian_7_wheezy_v20131014", "operationType": "setDeprecation", "progress": 0, "id": "11223768474922166090", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-1394594316110-4f4604ad0e708-2e4622ab", "user": "user@developer.gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_targetHttpProxies_web_proxy.json0000664000175000017500000000054212701023453033264 0ustar kamikami00000000000000{ "kind": "compute#targetHttpProxy", "id": "2276970411950672658", "creationTimestamp": "2014-08-22T09:47:35.425-07:00", "name": "web-proxy", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "urlMap": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map" } ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthCheck0000664000175000017500000000124312701023453034103 0ustar kamikami00000000000000{ "endTime": "2013-09-03T02:20:02.194-07:00", "id": "6717642434182216609", "insertTime": "2013-09-03T02:19:55.574-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_lchealthcheck_put", "operationType": "update", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_put", "startTime": "2013-09-03T02:19:55.628-07:00", "status": "DONE", "targetId": "0742691415598204878", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000140612701023453034050 0ustar kamikami00000000000000{ "creationTimestamp": "2013-09-03T00:51:05.300-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "13598380121688918358", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" ], "kind": "compute#targetPool", "name": "lctargetpool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_sslcertificates_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_sslcertificates0000664000175000017500000000111113153541406034226 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "5564862567931215044", "name": "operation-1472578091714-53b4d4e0f85d1-cf587a68-9d7a9200", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/sslCertificates/example", "targetId": "2064539516762881220", "status": "DONE", "user": "1294195755358-compute@developer.gserviceaccount.com", "progress": 100, "insertTime": "2016-08-30T10:28:11.948-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_sslcertificates_post" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnap0000664000175000017500000000146112701023453033664 0ustar kamikami00000000000000{ "id": "0158330665043557584", "insertTime": "2013-12-16T13:03:51.000-08:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", "operationType": "createSnapshot", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", "startTime": "2013-12-16T13:03:51.042-08:00", "status": "PENDING", "targetId": "07494414044179227172", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_subnetworks.json0000664000175000017500000000517713153541406030724 0ustar kamikami00000000000000{ "kind": "compute#subnetworkAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/subnetworks", "items": { "regions/us-central1": { "subnetworks": [ { "kind": "compute#subnetwork", "id": "4297043163355844284", "creationTimestamp": "2016-03-25T05:34:27.209-07:00", "gatewayAddress": "10.128.0.1", "name": "cf-972cf02e6ad49112", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.128.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } ] }, "regions/europe-west1": { "subnetworks": [ { "kind": "compute#subnetwork", "id": "447043451408125628", "creationTimestamp": "2016-03-25T05:34:27.272-07:00", "gatewayAddress": "10.132.0.1", "name": "cf-df1837b06a6f927b", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.132.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/subnetworks/cf-df1837b06a6f927b" } ] }, "regions/asia-east1": { "subnetworks": [ { "kind": "compute#subnetwork", "id": "1240429769038270140", "creationTimestamp": "2016-03-25T05:34:27.413-07:00", "gatewayAddress": "10.140.0.1", "name": "cf-4c2da366a0381eb9", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.140.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1/subnetworks/cf-4c2da366a0381eb9" } ] }, "regions/us-east1": { "subnetworks": [ { "kind": "compute#subnetwork", "id": "648244394139881148", "creationTimestamp": "2016-03-25T05:34:27.475-07:00", "gatewayAddress": "10.142.0.1", "name": "cf-daf1e2124a902a47", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.142.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1/subnetworks/cf-daf1e2124a902a47" } ] } } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_11.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_110000664000175000017500000000025512701023453033727 0ustar kamikami00000000000000{ "kind": "compute#license", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11", "name": "sles-11", "chargesUseFee": true } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json0000664000175000017500000000126612701023453032206 0ustar kamikami00000000000000{ "id": "0211151278250678078", "insertTime": "2013-06-26T16:48:17.403-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", "startTime": "2013-06-26T16:48:17.479-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_routes_lcdemoroute_delete.json0000664000175000017500000000105212701023453032730 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17322940416642455149", "name": "operation-global_routes_lcdemoroute_delete", "operationType": "destroy", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2014-11-25T11:00:44.049-08:00", "startTime": "2014-11-25T11:00:44.385-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_routes_lcdemoroute_delete" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_urlMaps.json0000664000175000017500000000111512701023453027106 0ustar kamikami00000000000000{ "kind": "compute#urlMapList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps", "id": "projects/project_name/global/urlMaps", "items": [ { "kind": "compute#urlMap", "id": "4266107551250249032", "creationTimestamp": "2014-08-15T16:16:54.084-07:00", "name": "web-map", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map", "defaultService": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "fingerprint": "JiV2ACVOAlg=" } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices0000664000175000017500000000120612701023453034132 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "8150500072597970926", "name": "operation_global_backendServices_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "targetId": "4582947879210482708", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-10T16:39:17.206-07:00", "startTime": "2014-10-10T16:39:17.613-07:00", "endTime": "2014-10-10T16:39:18.330-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_backendServices_post" } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-east1-b_instanceGroup_myinstancegroup.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-east1-b_instanceGroup_myinstancegr0000664000175000017500000000164613153541406033774 0ustar kamikami00000000000000{ "kind": "compute#instanceGroup", "id": "1968709502073089769", "creationTimestamp": "2016-08-11T16:53:42.413-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b", "name": "managed-instance-group-1", "description": "This instance group is controlled by Instance Group Manager 'myinstancegroup'. To modify instances in this group, use the Instance Group Manager API: https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers", "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default", "fingerprint": "42WmSpB8rSM=", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroups/myinstancegroup", "size": 4, "subnetwork": "https://content.googleapis.com/compute/v1/projects/project_name/regions/us-east1/subnetworks/cf-972cf02e6ad49113" } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_setBackup_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_set0000664000175000017500000000146612701023453034034 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:29:07.021-07:00", "id": "04072826501537092633", "insertTime": "2013-09-03T01:29:03.082-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lb_pool_setBackup_post", "operationType": "setBackup", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lb_pool_setBackup_post", "startTime": "2013-09-03T01:29:03.145-07:00", "status": "PENDING", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lb-pool", "user": "user@gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json0000664000175000017500000023032212701023453030752 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/machineTypes", "items": { "zones/europe-west1-a": { "machineTypes": [ { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8", "zone": "europe-west1-a" }, { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/f1-micro", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2", "zone": "europe-west1-a" }, { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/g1-small", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2-d", "zone": "europe-west1-a" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4-d", "zone": "europe-west1-a" } ] }, "zones/europe-west1-b": { "machineTypes": [ { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/f1-micro", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8", "zone": "europe-west1-b" }, { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/g1-small", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8-d", "zone": "europe-west1-b" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4", "zone": "europe-west1-b" } ] }, "zones/us-central1-a": { "machineTypes": [ { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/g1-small", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", "zone": "us-central1-a" } ] }, "zones/us-central1-b": { "machineTypes": [ { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/g1-small", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4", "zone": "us-central1-b" }, { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/f1-micro", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1-d", "zone": "us-central1-b" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", "zone": "us-central1-b" } ] }, "zones/us-central2-a": { "machineTypes": [ { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4", "zone": "us-central2-a" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2", "zone": "us-central2-a" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4", "zone": "us-central2-a" }, { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/g1-small", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2-d", "zone": "us-central2-a" }, { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/f1-micro", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8", "zone": "us-central2-a" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2", "zone": "us-central2-a" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4-d", "zone": "us-central2-a" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1-d", "zone": "us-central2-a" } ] } }, "kind": "compute#machineTypeAggregatedList", "nextPageToken": "ChhQRVJfUFJPSkVDVF9NQUNISU5FX1RZUEUSGjYwMDUzMTk1NTY3NS5uMS1zdGFuZGFyZC04", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/machineTypes" }././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_fo0000664000175000017500000000151012701023453034065 0ustar kamikami00000000000000{ "endTime": "2013-09-03T00:17:43.917-07:00", "id": "09064254309855814339", "insertTime": "2013-09-03T00:17:36.062-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", "operationType": "delete", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", "startTime": "2013-09-03T00:17:36.168-07:00", "status": "DONE", "targetId": "10901665092293158938", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers.j0000664000175000017500000000225013153541406033602 0ustar kamikami00000000000000{ "kind": "compute#instanceGroupManagerList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroupManagers", "id": "projects/project_name/zones/us-central1-a/instanceGroupManagers", "items": [ { "kind": "compute#instanceGroupManager", "id": "8588434319274454491", "creationTimestamp": "2016-07-18T10:34:44.679-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "name": "myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup", "baseInstanceName": "myinstancegroup", "fingerprint": "5bKcxzAnGOg=", "currentActions": { "none": 2, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 2, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroupManagers/myinstancegroup" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_asia-east1.json0000664000175000017500000000227313153541406027634 0ustar kamikami00000000000000{ "kind": "compute#region", "id": "1220", "creationTimestamp": "2014-05-30T18:35:16.514-07:00", "name": "asia-east1", "description": "asia-east1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/asia-east1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/asia-east1-b" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 0.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 0.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 0.0 }, { "metric": "SSD_TOTAL_GB", "limit": 2048.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUPS", "limit": 100.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUP_MANAGERS", "limit": 50.0, "usage": 0.0 }, { "metric": "INSTANCES", "limit": 240.0, "usage": 0.0 }, { "metric": "AUTOSCALERS", "limit": 50.0, "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1" } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_stop.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_0000664000175000017500000000122612701023453034105 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-stopnode", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "stop", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "targetId": "12335588484913203363", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-stopnode" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_routes.json0000664000175000017500000000466612701023453027022 0ustar kamikami00000000000000{ "kind": "compute#routeList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes", "id": "projects/project_name/global/routes", "items": [ { "kind": "compute#route", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/default-route-17d11bbbba01ce80", "id": "15220239546867835355", "creationTimestamp": "2014-01-21T10:30:55.592-08:00", "name": "default-route-17d11bbbba01ce80", "description": "Default route to the virtual network.", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "destRange": "10.240.0.0/16", "priority": 1000, "nextHopNetwork": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default" }, { "kind": "compute#route", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/default-route-e1808a2caeaf17fb", "id": "4898173129042082424", "creationTimestamp": "2014-01-21T10:30:55.584-08:00", "name": "default-route-e1808a2caeaf17fb", "description": "Default route to the Internet.", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "destRange": "0.0.0.0/0", "priority": 1000, "nextHopGateway": "https://www.googleapis.com/compute/v1/projects/project_name/global/gateways/default-internet-gateway" }, { "kind": "compute#route", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "id": "14575183394193523469", "creationTimestamp": "2014-11-25T11:00:45.062-08:00", "name": "lcdemoroute", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "tags": [ "tag1", "tag2" ], "destRange": "192.168.25.0/24", "priority": 1000, "nextHopInstance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100", "warnings": [ { "code": "NEXT_HOP_CANNOT_IP_FORWARD", "message": "Next hop instance 'https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100' cannot forward ip traffic. The next hop instance must have canIpForward set.", "data": [ { "key": "instance", "value": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100" } ] } ] } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.js0000664000175000017500000000270112701023453033272 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:54:07.687-08:00", "disks": [ { "boot": true, "deviceName": "lcnode-000", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-000", "type": "PERSISTENT" } ], "id": "17170905942674172532", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "lcnode-000", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.114.35", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.160.66" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_forwardingRules_post.json0000664000175000017500000000106512701023453031711 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "33333333333333333333", "name": "operation_global_forwardingRules_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-27T17:10:54.102-07:00", "startTime": "2014-10-27T17:10:54.531-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_forwardingRules_post" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices_post.json0000664000175000017500000000106612701023453031630 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "8150500075597870926", "name": "operation-global_backendServices_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-10T16:39:17.206-07:00", "startTime": "2014-10-10T16:39:17.613-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_backendServices_post" } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000134312701023453034071 0ustar kamikami00000000000000{ "id": "7487852523793007955", "insertTime": "2013-09-03T00:51:05.064-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_post", "operationType": "insert", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_post", "startTime": "2013-09-03T00:51:05.115-07:00", "status": "DONE", "targetId": "13598380121688918358", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_google-containers_global_images.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_google-containers_global_images.js0000664000175000017500000001356612701023453034020 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images", "id": "projects/google-containers/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522", "id": "12928746743513706688", "creationTimestamp": "2014-05-21T15:30:01.045-07:00", "name": "container-vm-v20140522", "description": "Google container VM image, GlueCon 2014 release", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522" }, "status": "READY", "archiveSizeBytes": "396961200", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140624", "id": "17637324906060386740", "creationTimestamp": "2014-06-24T17:59:38.240-07:00", "name": "container-vm-v20140624", "description": "Google container VM image, Google I/O 2014 release", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140624" }, "status": "READY", "archiveSizeBytes": "391233093", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140710", "id": "17658123443453052547", "creationTimestamp": "2014-07-15T17:29:02.737-07:00", "name": "container-vm-v20140710", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140710" }, "status": "READY", "archiveSizeBytes": "428253862", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140731", "id": "7675862044791361451", "creationTimestamp": "2014-08-01T09:41:06.107-07:00", "name": "container-vm-v20140731", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140731" }, "status": "READY", "archiveSizeBytes": "403951165", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140826", "id": "5337588031210316441", "creationTimestamp": "2014-08-26T16:19:17.533-07:00", "name": "container-vm-v20140826", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140826" }, "status": "READY", "archiveSizeBytes": "420545090", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140925", "id": "7326458020538492469", "creationTimestamp": "2014-09-25T14:23:38.865-07:00", "name": "container-vm-v20140925", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140925" }, "status": "READY", "archiveSizeBytes": "432272421", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140929", "id": "17245742661521590081", "creationTimestamp": "2014-09-29T13:49:23.330-07:00", "name": "container-vm-v20140929", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140929" }, "status": "READY", "archiveSizeBytes": "422162733", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20141016", "id": "857872787891330870", "creationTimestamp": "2014-10-22T13:40:01.068-07:00", "name": "container-vm-v20141016", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "432768815", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20141208", "id": "8037634834499556312", "creationTimestamp": "2014-12-09T15:57:34.413-08:00", "name": "container-vm-v20141208", "description": "Google container VM image", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "413371239", "diskSizeGb": "10" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_snapshots.json0000664000175000017500000000237212701023453027513 0ustar kamikami00000000000000{ "id": "projects/project_name/global/snapshots", "items": [ { "creationTimestamp": "2013-12-16T13:03:51.345-08:00", "description": "", "diskSizeGb": "1", "id": "17482266715940883688", "kind": "compute#snapshot", "name": "lcsnapshot", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sourceDiskId": "-2511816066479461182", "status": "READY" }, { "creationTimestamp": "2013-12-16T12:48:12.557-08:00", "description": "", "diskSizeGb": "10", "id": "3341332334980930052", "kind": "compute#snapshot", "name": "libcloud-demo-snapshot", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/libcloud-demo-snapshot", "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-snap-template", "sourceDiskId": "-6245698478147030397", "status": "READY" } ], "kind": "compute#snapshotList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json0000664000175000017500000000060312701023453027736 0ustar kamikami00000000000000{ "creationTimestamp": "2013-02-05T16:19:23.269-08:00", "description": "us-central1-a", "id": "13462829244527433283", "kind": "compute#zone", "name": "us-central1-a", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "status": "UP" }././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwa0000664000175000017500000000143312701023453034034 0ustar kamikami00000000000000{ "id": "09064254309855814339", "insertTime": "2013-09-03T00:17:36.062-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", "operationType": "delete", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", "startTime": "2013-09-03T00:17:36.168-07:00", "status": "PENDING", "targetId": "10901665092293158938", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_node_name_setMetadata_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_no0000664000175000017500000000123312701023453034151 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-setMetadata_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMetadata_post", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "DONE", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name/setMetadata", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node.0000664000175000017500000000270712701023453034031 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "stopped-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "status": "TERMINATED", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_forwardingRules_http_rule.json0000664000175000017500000000073612701023453032736 0ustar kamikami00000000000000{ "kind": "compute#forwardingRule", "id": "16224243838919174114", "creationTimestamp": "2014-08-22T11:15:26.174-07:00", "name": "http-rule", "IPAddress": "192.0.2.1", "IPProtocol": "TCP", "portRange": "80-80", "target": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "description": "global forwarding rule", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule" } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb0000664000175000017500000000162412701023453033717 0ustar kamikami00000000000000{ "creationTimestamp": "2013-09-02T22:25:45.817-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "16862638289615591831", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" ], "kind": "compute#targetPool", "name": "libcloud-lb-demo-lb-tp", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.jso0000664000175000017500000000065612701023453033743 0ustar kamikami00000000000000{ "address": "173.255.113.20", "creationTimestamp": "2013-06-26T12:21:40.625-07:00", "description": "", "id": "01531551729918243104", "kind": "compute#address", "name": "lcaddress", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "status": "RESERVED" }././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000140312701023453034045 0ustar kamikami00000000000000{ "id": "13500662190763995965", "insertTime": "2013-09-03T00:51:06.799-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_delete", "operationType": "delete", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_delete", "startTime": "2013-09-03T00:51:06.840-07:00", "status": "PENDING", "targetId": "13598380121688918358", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json0000664000175000017500000000116412701023453033205 0ustar kamikami00000000000000{ "id": "8983098895755095934", "insertTime": "2013-06-26T10:04:53.453-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_lcfirewall_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", "startTime": "2013-06-26T10:04:53.508-07:00", "status": "PENDING", "targetId": "0565629596395414121", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_de0000664000175000017500000000140712701023453033332 0ustar kamikami00000000000000{ "id": "17469711273432628502", "insertTime": "2013-06-26T16:13:40.579-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", "startTime": "2013-06-26T16:13:40.620-07:00", "status": "PENDING", "targetId": "16630486471904253898", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delet0000664000175000017500000000117512701023453033766 0ustar kamikami00000000000000{ "id": "1159296103027566387", "insertTime": "2013-09-02T22:18:02.509-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_lchealthcheck_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_delete", "startTime": "2013-09-02T22:18:02.558-07:00", "status": "PENDING", "targetId": "06860603312991823381", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_asia-east1-b.json0000664000175000017500000000055013153541406027537 0ustar kamikami00000000000000{ "kind": "compute#zone", "id": "2220", "creationTimestamp": "2014-05-30T18:35:16.575-07:00", "name": "asia-east1-a", "description": "asia-east1-a", "status": "UP", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/asia-east1-a" } ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000140212701023453034065 0ustar kamikami00000000000000{ "id": "13500662190763995965", "insertTime": "2013-09-03T00:51:06.799-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_delete", "operationType": "delete", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_delete", "startTime": "2013-09-03T00:51:06.840-07:00", "status": "DONE", "targetId": "13598380121688918358", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_insert.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_insert.j0000664000175000017500000000115513153541406034042 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones_us_central1_a/instanceGroups/myname", "operationType": "compute.instanceGroups.insert", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_insert", "user": "1264195755357-compute@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json0000664000175000017500000000072012701023453033554 0ustar kamikami00000000000000{ "checkIntervalSec": 10, "creationTimestamp": "2013-09-02T22:18:01.180-07:00", "description": "test healthcheck", "healthyThreshold": 3, "host": "lchost", "id": "06860603312991823381", "kind": "compute#httpHealthCheck", "name": "lchealthcheck", "port": 8000, "requestPath": "/lc", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/lchealthcheck", "timeoutSec": 10, "unhealthyThreshold": 4 }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.js0000664000175000017500000000270112701023453033273 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:54:08.639-08:00", "disks": [ { "boot": true, "deviceName": "lcnode-001", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-001", "type": "PERSISTENT" } ], "id": "09356229693786319079", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "lcnode-001", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.117.19", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.168.208" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_myinstancegroup.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_m0000664000175000017500000000171613153541406033674 0ustar kamikami00000000000000{ "kind": "compute#instanceGroupManager", "id": "8604381270851510464", "creationTimestamp": "2016-07-18T15:54:39.153-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "name": "myinstancegroup", "description": "my description for myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup", "baseInstanceName": "base-foo", "fingerprint": "Q21hYveq9do=", "currentActions": { "none": 4, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 4, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroupManagers/myinstancegroup" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_coreos-cloud_global_images_family_coreos.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_coreos-cloud_global_images_family_0000664000175000017500000000075413153541406034064 0ustar kamikami00000000000000{ "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-522-3-0-v20141226", "id": "14171939663085407486", "creationTimestamp": "2014-12-26T15:04:01.237-08:00", "name": "coreos-beta-522-3-0-v20141226", "description": "CoreOS beta 522.3.0", "family": "coreos", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "220932284", "diskSizeGb": "9" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks.json0000664000175000017500000000141513153541406032505 0ustar kamikami00000000000000{ "kind": "compute#subnetworkList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks", "id": "projects/project_name/regions/us-central1/subnetworks", "items": [ { "kind": "compute#subnetwork", "id": "4297043163355844284", "creationTimestamp": "2016-03-25T05:34:27.209-07:00", "gatewayAddress": "10.128.0.1", "name": "cf-972cf02e6ad49112", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.128.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_custom_network.json0000664000175000017500000000140113153541406032505 0ustar kamikami00000000000000{ "kind": "compute#network", "id": "5125152985904090792", "creationTimestamp": "2016-03-25T05:34:15.077-07:00", "name": "custom-network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/custom-network", "autoCreateSubnetworks": false, "subnetworks": [ "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1/subnetworks/cf-daf1e2124a902a47", "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1/subnetworks/cf-4c2da366a0381eb9", "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/subnetworks/cf-df1837b06a6f927b" ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_cf.json0000664000175000017500000000135013153541406030015 0ustar kamikami00000000000000{ "kind": "compute#network", "id": "5125152985904090792", "creationTimestamp": "2016-03-25T05:34:15.077-07:00", "name": "cf", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "autoCreateSubnetworks": true, "subnetworks": [ "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1/subnetworks/cf-daf1e2124a902a47", "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1/subnetworks/cf-4c2da366a0381eb9", "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/subnetworks/cf-df1837b06a6f927b" ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices-web-service.json0000664000175000017500000000234112701023453032771 0ustar kamikami00000000000000{ "kind": "compute#backendServiceList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices", "id": "projects/project_name/global/backendServices", "items": [ { "kind": "compute#backendService", "id": "12158223670162062306", "creationTimestamp": "2014-08-14T14:37:36.728-07:00", "name": "web-service", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "backends": [ { "description": "", "group": "https://www.googleapis.com/resourceviews/v1beta1/projects/project_name/zones/us-central1-b/resourceViews/us-resources", "balancingMode": "RATE", "maxRate": 100, "capacityScaler": 1.0 }, { "description": "", "group": "https://www.googleapis.com/resourceviews/v1beta1/projects/project_name/zones/europe-west1-b/resourceViews/eu-resources", "balancingMode": "RATE", "maxRate": 100, "capacityScaler": 1.0 } ], "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" ], "timeoutSec": 30, "port": 80, "protocol": "HTTP", "fingerprint": "Do4_wUywpJU=", "portName": "" } ] } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.jso0000664000175000017500000000270012701023453033552 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "node-name", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_rhel-cloud_global_images.json0000664000175000017500000000311612701023453032762 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/rhel-cloud/global/images", "id": "projects/rhel-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/rhel-cloud/global/images/rhel-7-v20141108", "id": "9656001643961289198", "creationTimestamp": "2014-11-10T14:15:28.054-08:00", "name": "rhel-7-v20141108", "description": "Red Hat Enterprise Linux 7.0 x86_64 built on 2014-11-08", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/rhel-cloud/global/images/rhel-7-v20141205" }, "status": "READY", "archiveSizeBytes": "1201321944", "diskSizeGb": "10", "licenses": [ "https://content.googleapis.com/compute/v1/projects/rhel-cloud/global/licenses/rhel-7-server" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/rhel-cloud/global/images/rhel-7-v20141205", "id": "9718957591079040023", "creationTimestamp": "2014-12-08T17:07:02.804-08:00", "name": "rhel-7-v20141205", "description": "Red Hat Enterprise Linux 7.0 x86_64 built on 2014-12-05", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1204146084", "diskSizeGb": "10", "licenses": [ "https://content.googleapis.com/compute/v1/projects/rhel-cloud/global/licenses/rhel-7-server" ] } ] } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_setMachineType.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_0000664000175000017500000000125412701023453034106 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-setMachineType", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setMachineType", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "targetId": "12335588484913203363", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMachineType" } ././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_targetInstances_lctargetinstance_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_ta0000664000175000017500000000140612701023453033777 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "16305469717066123402", "name": "operation-zones_us-central1-a_targetInstances_lctargetinstance_delete", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance", "targetId": "8092539649535704539", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2014-11-14T13:05:18.564-08:00", "startTime": "2014-11-14T13:05:18.868-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_targetInstances_lctargetinstance_delete" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json0000664000175000017500000000065712701023453033137 0ustar kamikami00000000000000{ "checkIntervalSec": 5, "creationTimestamp": "2013-08-19T14:42:28.947-07:00", "description": "", "healthyThreshold": 2, "host": "", "id": "7660832580304455442", "kind": "compute#httpHealthCheck", "name": "basic-check", "port": 80, "requestPath": "/", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check", "timeoutSec": 5, "unhealthyThreshold": 2 }././@LongLink0000000000000000000000000000022300000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000132412701023453034000 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:08.382-07:00", "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node_name_deleteAccessConfig_post", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node_name_deleteAccessConfig_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "PENDING", "targetId": "16630486471904253898", "user": "foo@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsna0000664000175000017500000000130312701023453034237 0ustar kamikami00000000000000{ "endTime": "2013-12-16T13:04:11.565-08:00", "id": "5994251357251874363", "insertTime": "2013-12-16T13:04:03.831-08:00", "kind": "compute#operation", "name": "operation-global_snapshots_lcsnapshot_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_snapshots_lcsnapshot_delete", "startTime": "2013-12-16T13:04:03.924-08:00", "status": "DONE", "targetId": "17482266715940883688", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_instanceGroupManagers.json0000664000175000017500000001556713153541406032641 0ustar kamikami00000000000000{ "kind": "compute#instanceGroupManagerAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/instanceGroupManagers", "items": { "regions/us-central1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'regions/us-central1' on this page.", "data": [ { "key": "scope", "value": "regions/us-central1" } ] } }, "regions/europe-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'regions/europe-west1' on this page.", "data": [ { "key": "scope", "value": "regions/europe-west1" } ] } }, "regions/us-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'regions/us-west1' on this page.", "data": [ { "key": "scope", "value": "regions/us-west1" } ] } }, "regions/asia-east1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'regions/asia-east1' on this page.", "data": [ { "key": "scope", "value": "regions/asia-east1" } ] } }, "regions/us-east1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'regions/us-east1' on this page.", "data": [ { "key": "scope", "value": "regions/us-east1" } ] } }, "zones/us-central1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-central1-a' on this page.", "data": [ { "key": "scope", "value": "zones/us-central1-a" } ] } }, "zones/us-central1-b": { "instanceGroupManagers": [ { "kind": "compute#instanceGroupManager", "id": "8604381270851510464", "creationTimestamp": "2016-07-18T15:54:39.153-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "name": "myinstancegroup", "description": "my description for myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroups/myinstancegroup", "baseInstanceName": "base-foo", "fingerprint": "Q21hYveq9do=", "currentActions": { "none": 4, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 4, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroupManagers/myinstancegroup" } ] }, "zones/us-central1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-central1-c' on this page.", "data": [ { "key": "scope", "value": "zones/us-central1-c" } ] } }, "zones/us-central1-f": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-central1-f' on this page.", "data": [ { "key": "scope", "value": "zones/us-central1-f" } ] } }, "zones/europe-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-b' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-b" } ] } }, "zones/europe-west1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-c' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-c" } ] } }, "zones/europe-west1-d": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-d' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-d" } ] } }, "zones/us-west1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-west1-a' on this page.", "data": [ { "key": "scope", "value": "zones/us-west1-a" } ] } }, "zones/us-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-west1-b' on this page.", "data": [ { "key": "scope", "value": "zones/us-west1-b" } ] } }, "zones/asia-east1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-a' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-a" } ] } }, "zones/asia-east1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-b' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-b" } ] } }, "zones/asia-east1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-c' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-c" } ] } }, "zones/us-east1-b": { "instanceGroupManagers": [ { "kind": "compute#instanceGroupManager", "id": "8588434319274454491", "creationTimestamp": "2016-07-18T10:34:44.679-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b", "name": "myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroups/myinstancegroup", "baseInstanceName": "myinstancegroup", "fingerprint": "5bKcxzAnGOg=", "currentActions": { "none": 2, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 2, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroupManagers/myinstancegroup" } ] }, "zones/us-east1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-east1-c' on this page.", "data": [ { "key": "scope", "value": "zones/us-east1-c" } ] } }, "zones/us-east1-d": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-east1-d' on this page.", "data": [ { "key": "scope", "value": "zones/us-east1-d" } ] } } } } ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxies_web_proxy_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxi0000664000175000017500000000124312701023453034210 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "12025659947133083605", "name": "operation_global_targetHttpProxies_web_proxy_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "targetId": "5243939392541625113", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-28T12:21:47.406-07:00", "startTime": "2014-10-28T12:21:47.666-07:00", "endTime": "2014-10-28T12:21:48.419-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_targetHttpProxies_web_proxy_delete" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_post.0000664000175000017500000000113012701023453034133 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-global_addresses_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_addresses_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "DONE", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "user": "foo@developer.gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_post.json0000664000175000017500000000102312701023453027773 0ustar kamikami00000000000000{ "id": "15196339658718959621", "insertTime": "2014-03-09T21:04:31.228-07:00", "kind": "compute#operation", "name": "coreos", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_image_post", "startTime": "2014-03-09T21:04:31.291-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/coreos", "user": "897001307951@developer.gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_ubuntu-os-cloud_global_images.json0000664000175000017500000001317112701023453033773 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images", "id": "projects/ubuntu-os-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1204-precise-v20141028", "id": "15508054221909398824", "creationTimestamp": "2014-10-29T09:51:42.018-07:00", "name": "ubuntu-1204-precise-v20141028", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1204-precise-v20141212" }, "status": "READY", "archiveSizeBytes": "378614228", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1204-precise-v20141031", "id": "3216640293232429175", "creationTimestamp": "2014-11-07T00:48:18.673-08:00", "name": "ubuntu-1204-precise-v20141031", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1204-precise-v20141212" }, "status": "READY", "archiveSizeBytes": "1137275331", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1204-precise-v20141212", "id": "12756823774499736482", "creationTimestamp": "2014-12-17T11:31:59.126-08:00", "name": "ubuntu-1204-precise-v20141212", "description": "Canonical, Ubuntu, 12.04 LTS, amd64 precise image built on 2014-12-12", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1140352383", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20141028", "id": "17781692821175088041", "creationTimestamp": "2014-10-29T09:51:42.072-07:00", "name": "ubuntu-1404-trusty-v20141028", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20141212" }, "status": "READY", "archiveSizeBytes": "375105326", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20141031a", "id": "6500942514398264968", "creationTimestamp": "2014-11-07T00:48:04.471-08:00", "name": "ubuntu-1404-trusty-v20141031a", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20141212" }, "status": "READY", "archiveSizeBytes": "1015791618", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20141212", "id": "14921913565797044708", "creationTimestamp": "2014-12-17T11:31:37.868-08:00", "name": "ubuntu-1404-trusty-v20141212", "description": "Canonical, Ubuntu, 14.04 LTS, amd64 trusty image built on 2014-12-12", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "981936603", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20141029", "id": "13898107879086076373", "creationTimestamp": "2014-10-29T09:51:39.609-07:00", "name": "ubuntu-1410-utopic-v20141029", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20141217" }, "status": "READY", "archiveSizeBytes": "348037877", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20141030a", "id": "2412636152371793564", "creationTimestamp": "2014-11-07T00:47:53.082-08:00", "name": "ubuntu-1410-utopic-v20141030a", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20141217" }, "status": "READY", "archiveSizeBytes": "1044175299", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20141217", "id": "6363233028234584116", "creationTimestamp": "2014-12-17T11:31:26.695-08:00", "name": "ubuntu-1410-utopic-v20141217", "description": "Canonical, Ubuntu, 14.10, amd64 utopic image built on 2014-12-17", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1048366941", "diskSizeGb": "10" } ] } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstancegroup.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstanc0000664000175000017500000000164613153541406033771 0ustar kamikami00000000000000{ "kind": "compute#instanceGroup", "id": "1968709502073089769", "creationTimestamp": "2016-08-11T16:53:42.413-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "name": "myinstancegroup", "description": "This instance group is controlled by Instance Group Manager 'myinstancegroup'. To modify instances in this group, use the Instance Group Manager API: https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers", "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default", "fingerprint": "42WmSpB8rSM=", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup", "size": 4, "subnetwork": "https://content.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_windows-cloud_global_images.json0000664000175000017500000000157612701023453033532 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/windows-cloud/global/images", "id": "projects/windows-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/windows-cloud/global/images/windows-server-2008-r2-dc-v20141120", "id": "10752443450426453317", "creationTimestamp": "2014-12-02T10:31:56.162-08:00", "name": "windows-server-2008-r2-dc-v20141120", "description": "Microsoft Windows Server 2008 R2 Datacenter Edition built on 2014-11-20", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "6974709077", "diskSizeGb": "100", "licenses": [ "https://content.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-2008-r2-dc" ] } ] } ././@LongLink0000000000000000000000000000021500000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_removeInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_in0000664000175000017500000000117713153541406034157 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.removeInstances", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_removeInstances", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lb_pool_setBackup_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000146512701023453034076 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:29:07.021-07:00", "id": "04072826501537092633", "insertTime": "2013-09-03T01:29:03.082-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lb_pool_setBackup_post", "operationType": "setBackup", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lb_pool_setBackup_post", "startTime": "2013-09-03T01:29:03.145-07:00", "status": "DONE", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lb-pool", "user": "user@gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/setCommonInstanceMetadata_post.json0000664000175000017500000000120712701023453032304 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-setCommonInstanceMetadata", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-setCommonInstanceMetadata", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "PENDING", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/setCommonInstanceMetadata", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000143512701023453034052 0ustar kamikami00000000000000{ "id": "04072826501537092633", "insertTime": "2013-09-03T01:29:03.082-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", "operationType": "addInstance", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", "startTime": "2013-09-03T01:29:03.145-07:00", "status": "PENDING", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/setUsageExportBucket_post.json0000664000175000017500000000105112701023453031327 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17203609782824174066", "name": "operation-setUsageExportBucket", "operationType": "setUsageExportBucket", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name", "targetId": "8116069320260064853", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2014-11-21T06:58:03.602-08:00", "startTime": "2014-11-21T06:58:04.018-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-setUsageExportBucket" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_routes_post.json0000664000175000017500000000101512701023453030050 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17322940416642455149", "name": "operation-global_routes_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2014-11-25T11:00:44.049-08:00", "startTime": "2014-11-25T11:00:44.385-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_routes_post" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthCheck0000664000175000017500000000113312701023453034101 0ustar kamikami00000000000000{ "id": "3903393118268087410", "insertTime": "2013-09-03T02:19:54.629-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_post", "startTime": "2013-09-03T02:19:54.718-07:00", "status": "DONE", "targetId": "0742691415598204878", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_insert.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_in0000664000175000017500000000115513153541406034153 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones_us_central1_a/instanceGroups/myname", "operationType": "compute.instanceGroups.insert", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_insert", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_vo0000664000175000017500000000135512701023453034166 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "14265294323024381703", "name": "operation-volume-auto-delete", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setDiskAutoDelete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", "targetId": "4313186599918690450", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-03-13T21:50:57.612-07:00", "startTime": "2014-03-13T21:50:57.717-07:00", "endTime": "2014-03-13T21:50:58.047-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_stop.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_sto0000664000175000017500000000122312701023453034072 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-stopnode", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "stop", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "targetId": "12335588484913203363", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-stopnode" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-east1-b.json0000664000175000017500000000054013153541406027250 0ustar kamikami00000000000000{ "kind": "compute#zone", "id": "2231", "creationTimestamp": "2015-09-08T16:57:06.746-07:00", "name": "us-east1-b", "description": "us-east1-b", "status": "UP", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b" } ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000146012701023453034001 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:06:00.917-07:00", "id": "6999931397447918763", "insertTime": "2013-06-26T10:05:40.350-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", "startTime": "2013-06-26T10:05:40.405-07:00", "status": "DONE", "targetId": "07410051435384876224", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setMachineType.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_set0000664000175000017500000000127712701023453034071 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-setMachineType-notstopped", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setMachineType", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "targetId": "12335588484913203363", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMachineType-notstopped" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances.json0000664000175000017500000000240312701023453033154 0ustar kamikami00000000000000{ "kind": "compute#targetInstanceList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances", "id": "projects/project_name/zones/us-central1-a/targetInstances", "items": [ { "kind": "compute#targetInstance", "id": "8092539649535704539", "creationTimestamp": "2014-08-07T12:46:10.372-07:00", "name": "hello", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "natPolicy": "NO_NAT", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/hello" }, { "kind": "compute#targetInstance", "id": "9539205115599811578", "creationTimestamp": "2014-08-07T13:09:19.634-07:00", "name": "lctargetinstance", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "natPolicy": "NO_NAT", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance" } ] } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_setMachineType.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_se0000664000175000017500000000125312701023453034146 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-setMachineType", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setMachineType", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "targetId": "12335588484913203363", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMachineType" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_att0000664000175000017500000000143012701023453033627 0ustar kamikami00000000000000{ "id": "7455886659787654716", "insertTime": "2013-06-26T16:48:27.691-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", "operationType": "attachDisk", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", "startTime": "2013-06-26T16:48:27.762-07:00", "status": "PENDING", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetw0000664000175000017500000000123412701023453034270 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:12.607-07:00", "id": "4914541423567262393", "insertTime": "2013-06-26T10:05:11.102-07:00", "kind": "compute#operation", "name": "operation-global_networks_lcnetwork_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", "startTime": "2013-06-26T10:05:11.273-07:00", "status": "DONE", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_disks.json0000664000175000017500000003461013153541406027445 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/disks", "items": { "zones/europe-west1-a": { "disks": [ { "creationTimestamp": "2013-12-13T10:43:33.753-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "0819226106922408217", "kind": "compute#disk", "name": "libcloud-demo-europe-boot-disk", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "creationTimestamp": "2013-12-13T10:43:20.420-08:00", "id": "30789070506648158", "kind": "compute#disk", "name": "libcloud-demo-europe-attach-disk", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", "sizeGb": "1", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "creationTimestamp": "2013-12-13T10:43:07.390-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "01221310665639400697", "kind": "compute#disk", "name": "libcloud-demo-europe-np-node", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "creationTimestamp": "2013-12-13T10:43:53.598-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "17495188440080825940", "kind": "compute#disk", "name": "libcloud-demo-europe-multiple-nodes-000", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" } ] }, "zones/europe-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/europe-west1-b" } ], "message": "There are no results for scope 'zones/europe-west1-b' on this page." } }, "zones/us-central1-a": { "disks": [ { "creationTimestamp": "2013-12-13T10:45:20.308-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "0265567475385851075", "kind": "compute#disk", "name": "node-name", "description": "I'm a happy little disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, { "creationTimestamp": "2013-12-13T10:45:42.139-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218000", "kind": "compute#disk", "name": "lcdisk", "description": "I'm a happy little disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567789", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, { "creationTimestamp": "2013-12-13T10:54:07.687-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218002", "kind": "compute#disk", "name": "lcnode-000", "description": "I'm a happy little persistent disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-000", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567789", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, { "creationTimestamp": "2013-12-13T10:54:07.687-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218000", "kind": "compute#disk", "name": "lcnode-001", "description": "I'm a happy little persistent disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-001", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567791", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ] }, "zones/us-central1-b": { "disks": [ { "creationTimestamp": "2013-09-04T11:03:54.122-07:00", "description": "Persistent boot disk created from https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130723.", "id": "8658241308250794904", "kind": "compute#disk", "name": "test1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/test1", "sizeGb": "10", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" }, { "creationTimestamp": "2013-12-13T10:54:07.687-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218001", "kind": "compute#disk", "name": "libcloud-lb-demo-www-000", "description": "I'm a happy little persistent disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-000", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567791", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" }, { "creationTimestamp": "2013-12-13T10:54:07.687-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218001", "kind": "compute#disk", "name": "libcloud-lb-demo-www-001", "description": "I'm a happy little persistent disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-001", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567791", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" } ] }, "zones/us-central2-a": { "disks": [ { "creationTimestamp": "2013-12-13T10:41:59.430-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "3371304879167251249", "kind": "compute#disk", "name": "libcloud-demo-boot-disk", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-boot-disk", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "creationTimestamp": "2013-12-13T10:42:15.355-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "12650345960824309663", "kind": "compute#disk", "name": "libcloud-demo-multiple-nodes-000", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-000", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "creationTimestamp": "2013-12-13T10:41:52.533-08:00", "id": "01867312924613359214", "kind": "compute#disk", "name": "libcloud-demo-attach-disk", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-attach-disk", "sizeGb": "1", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "creationTimestamp": "2013-12-13T10:42:15.949-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "12498700959701905851", "kind": "compute#disk", "name": "libcloud-demo-multiple-nodes-001", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-001", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "creationTimestamp": "2013-12-13T10:41:44.063-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "345757781195247006", "kind": "compute#disk", "name": "libcloud-demo-np-node", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-np-node", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "creationTimestamp": "2013-12-13T10:45:42.139-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "08045379695757218000", "kind": "compute#disk", "name": "lcdisk", "description": "I'm a happy little disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/lcdisk", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567789", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" } ] } }, "kind": "compute#diskAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/disks" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json0000664000175000017500000000115712701023453032757 0ustar kamikami00000000000000{ "id": "4914541423567262393", "insertTime": "2013-06-26T10:05:11.102-07:00", "kind": "compute#operation", "name": "operation-global_networks_lcnetwork_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", "startTime": "2013-06-26T10:05:11.273-07:00", "status": "PENDING", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_routes_lcdemoroute.json0000664000175000017500000000176212701023453031416 0ustar kamikami00000000000000{ "kind": "compute#route", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "id": "14575183394193523469", "creationTimestamp": "2014-11-25T11:00:45.062-08:00", "name": "lcdemoroute", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "tags": [ "tag1", "tag2" ], "destRange": "192.168.25.0/24", "priority": 1000, "nextHopInstance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100", "warnings": [ { "code": "NEXT_HOP_CANNOT_IP_FORWARD", "message": "Next hop instance 'https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100' cannot forward ip traffic. The next hop instance must have canIpForward set.", "data": [ { "key": "instance", "value": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-f/instances/libcloud-100" } ] } ] } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_di0000664000175000017500000000141112701023453033763 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:48:25.375-07:00", "id": "0211151278250678078", "insertTime": "2013-06-26T16:48:17.403-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", "startTime": "2013-06-26T16:48:17.479-07:00", "status": "DONE", "targetId": "03196637868764498730", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks.json0000664000175000017500000000337013153541406027351 0ustar kamikami00000000000000{ "kind": "compute#networkList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks", "id": "projects/project_name/global/networks", "items": [ { "kind": "compute#network", "id": "5125152985904090792", "creationTimestamp": "2016-03-25T05:34:15.077-07:00", "name": "cf", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "autoCreateSubnetworks": true, "subnetworks": [ "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1/subnetworks/cf-daf1e2124a902a47", "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1/subnetworks/cf-4c2da366a0381eb9", "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/subnetworks/cf-df1837b06a6f927b" ] }, { "kind": "compute#network", "id": "7887441312352916157", "creationTimestamp": "2016-04-30T10:33:06.252-07:00", "name": "custom", "description": "Custom network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/custom", "autoCreateSubnetworks": false, "subnetworks": [ "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/subnet1" ] }, { "kind": "compute#network", "id": "2672023774255449680", "creationTimestamp": "2014-01-21T10:30:55.392-08:00", "name": "default", "description": "Default network for the project", "IPv4Range": "10.240.0.0/16", "gatewayIPv4": "10.240.0.1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default" } ] } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_web_map_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_web_map0000664000175000017500000000117612701023453034162 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_urlMaps_web_map_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map", "targetId": "1955555555986139870", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-28T12:36:28.927-07:00", "startTime": "2014-10-28T12:36:29.146-07:00", "endTime": "2014-10-28T12:36:29.693-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_urlMaps_web_map_delete" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname.j0000664000175000017500000000067313153541406034030 0ustar kamikami00000000000000{ "kind": "compute#instanceGroup", "id": "5837905299775594184", "creationTimestamp": "2016-09-09T13:48:39.700-07:00", "name": "myname", "description": "", "fingerprint": "42WmSpB8rSM=", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "size": 0 } ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000145712701023453034056 0ustar kamikami00000000000000{ "id": "14738174613993796821", "insertTime": "2013-09-03T01:28:32.889-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", "operationType": "removeHealthCheck", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", "startTime": "2013-09-03T01:28:32.942-07:00", "status": "PENDING", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes.json0000664000175000017500000000213612701023453032000 0ustar kamikami00000000000000{ "kind": "compute#diskTypeList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes", "id": "projects/project_name/zones/us-central1-a/diskTypes", "items": [ { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ] } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool.jso0000664000175000017500000000146312701023453033750 0ustar kamikami00000000000000{ "kind": "compute#targetPool", "id": "11474672125700394323", "creationTimestamp": "2014-11-24T12:52:13.366-08:00", "name": "lb-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" ], "sessionAffinity": "NONE", "failoverRatio": 0.1, "backupPool": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lb-pool" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json0000664000175000017500000000102412701023453032462 0ustar kamikami00000000000000{ "creationTimestamp": "2013-06-26T10:06:04.007-07:00", "id": "16109451798967042451", "kind": "compute#disk", "name": "lcdisk", "description": "I'm a happy little SSD", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sizeGb": "10", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json0000664000175000017500000000327012701023453030715 0ustar kamikami00000000000000{ "id": "projects/project_name/global/httpHealthChecks", "items": [ { "checkIntervalSec": 5, "creationTimestamp": "2013-08-19T14:42:28.947-07:00", "description": "", "healthyThreshold": 2, "host": "", "id": "7660832580304455442", "kind": "compute#httpHealthCheck", "name": "basic-check", "port": 80, "requestPath": "/", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check", "timeoutSec": 5, "unhealthyThreshold": 2 }, { "checkIntervalSec": 10, "creationTimestamp": "2013-12-13T10:52:46.800-08:00", "healthyThreshold": 3, "host": "lchost", "id": "022194976205566532", "kind": "compute#httpHealthCheck", "name": "lchealthcheck", "description": "test healthcheck", "port": 9000, "requestPath": "/lc", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/lchealthcheck", "timeoutSec": 10, "unhealthyThreshold": 4 }, { "checkIntervalSec": 5, "creationTimestamp": "2013-12-13T10:51:42.762-08:00", "healthyThreshold": 2, "id": "08359377740909791076", "kind": "compute#httpHealthCheck", "name": "libcloud-lb-demo-healthcheck", "port": 80, "requestPath": "/", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck", "timeoutSec": 5, "unhealthyThreshold": 2 } ], "kind": "compute#httpHealthCheckList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices-empty.json0000664000175000017500000000030412701023453031711 0ustar kamikami00000000000000{ "kind": "compute#backendServiceList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices", "id": "projects/project_name/global/backendServices" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_urlMaps_web_map_delete.json0000664000175000017500000000112212701023453032120 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_urlMaps_web_map_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map", "targetId": "1955555555986139870", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-28T12:36:28.927-07:00", "startTime": "2014-10-28T12:36:29.146-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_urlMaps_web_map_delete" } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_backup_pool.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_backup_pool0000664000175000017500000000140512701023453034022 0ustar kamikami00000000000000{ "creationTimestamp": "2013-09-03T00:51:05.300-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "13598380121688918358", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" ], "kind": "compute#targetPool", "name": "backup-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions-paged-2.json0000664000175000017500000000223312701023453027030 0ustar kamikami00000000000000{ "kind": "compute#regionList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions", "id": "projects/project_name/regions", "items": [ { "kind": "compute#region", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "id": "1000", "creationTimestamp": "2014-04-11T13:47:12.495-07:00", "name": "us-central1", "description": "us-central1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 1.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 5120.0, "usage": 60.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 1.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 1.0 }, { "metric": "SSD_TOTAL_GB", "limit": 1024.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 1500.0, "usage": 0.0 } ] } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json0000664000175000017500000000114612701023453034141 0ustar kamikami00000000000000{ "endTime": "2014-03-09T21:04:33.291-07:00", "id": "15196339658718959621", "insertTime": "2014-03-09T21:04:31.228-07:00", "kind": "compute#operation", "name": "coreos", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_image_post", "startTime": "2014-03-09T21:04:31.291-07:00", "status": "DONE", "targetId": "12551176716147327315", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/coreos", "user": "897001307951@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-nopubip-001.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-n0000664000175000017500000000301313153541406033544 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:51:25.165-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-lb-demo-www-001", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-001", "type": "PERSISTENT" } ], "id": "11523404878663997348", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", "metadata": { "fingerprint": "09vSzO6KXcw=", "items": [ { "key": "startup-script", "value": "apt-get -y update && apt-get -y install apache2 && hostname > /var/www/index.html" } ], "kind": "compute#metadata" }, "name": "libcloud-lb-nopubip-001", "networkInterfaces": [ { "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.94.66" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-nopubip-001", "status": "RUNNING", "tags": { "fingerprint": "XI0he92M8l8=", "items": [ "libcloud-lb-demo-www" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_opensuse-cloud_global_images.json0000664000175000017500000000632412701023453033675 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images", "id": "projects/opensuse-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20140609", "id": "5330388859130445578", "creationTimestamp": "2014-06-09T11:45:56.997-07:00", "name": "opensuse-13-1-v20140609", "description": "", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20140627", "deprecated": "2014-06-27T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "872777232", "diskSizeGb": "8" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20140627", "id": "635369188275126205", "creationTimestamp": "2014-06-27T08:44:37.896-07:00", "name": "opensuse-13-1-v20140627", "description": "", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20140711", "deprecated": "2014-07-11T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "878545758", "diskSizeGb": "8" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20140711", "id": "414150705420767734", "creationTimestamp": "2014-07-11T14:04:35.210-07:00", "name": "opensuse-13-1-v20140711", "description": "", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20141102", "deprecated": "2014-11-02T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "878437869", "diskSizeGb": "8" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-1-v20141102", "id": "4629398803823711760", "creationTimestamp": "2014-11-02T04:34:26.263-08:00", "name": "opensuse-13-1-v20141102", "description": "openSUSE 13.1 (built on 2014-11-02)", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "881210631", "diskSizeGb": "8" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-13-2-v20141205", "id": "7015477126275748573", "creationTimestamp": "2014-12-09T05:36:16.085-08:00", "name": "opensuse-13-2-v20141205", "description": "openSUSE 13.2 (built on 2014-12-05)", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1091494581", "diskSizeGb": "8" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json0000664000175000017500000000447612701023453031507 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/forwardingRules", "items": { "regions/europe-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/europe-west1" } ], "message": "There are no results for scope 'regions/europe-west1' on this page." } }, "regions/us-central1": { "forwardingRules": [ { "IPAddress": "108.59.86.60", "IPProtocol": "TCP", "creationTimestamp": "2013-12-13T10:51:47.602-08:00", "id": "0401221837226610637", "kind": "compute#forwardingRule", "name": "libcloud-lb-demo-lb", "portRange": "80-80", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }, { "IPAddress": "173.255.114.35", "IPProtocol": "TCP", "creationTimestamp": "2013-12-13T10:52:57.170-08:00", "id": "06342111469679701315", "kind": "compute#forwardingRule", "name": "lcforwardingrule", "description": "test forwarding rule", "portRange": "8000-8500", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" } ] }, "regions/us-central2": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-central2" } ], "message": "There are no results for scope 'regions/us-central2' on this page." } } }, "kind": "compute#forwardingRuleAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/forwardingRules" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1.json0000664000175000017500000000231613153541406030040 0ustar kamikami00000000000000{ "kind": "compute#region", "id": "1000", "creationTimestamp": "2014-05-30T18:35:16.413-07:00", "name": "us-central1", "description": "us-central1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" ], "quotas": [ { "metric": "CPUS", "limit": 1050.0, "usage": 30.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 20000.0, "usage": 344.0 }, { "metric": "STATIC_ADDRESSES", "limit": 10.0, "usage": 2.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 1050.0, "usage": 11.0 }, { "metric": "SSD_TOTAL_GB", "limit": 2048.0, "usage": 500.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUPS", "limit": 100.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUP_MANAGERS", "limit": 50.0, "usage": 0.0 }, { "metric": "INSTANCES", "limit": 10500.0, "usage": 11.0 }, { "metric": "AUTOSCALERS", "limit": 50.0, "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_gce-nvme_global_images.json0000664000175000017500000000373612701023453032435 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/gce-nvme/global/images", "id": "projects/gce-nvme/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/gce-nvme/global/images/nvme-backports-debian-7-wheezy-v20140904", "id": "11933993573261788709", "creationTimestamp": "2014-11-05T20:09:29.302-08:00", "name": "nvme-backports-debian-7-wheezy-v20140904", "description": "NVMe optimized Debian GNU/Linux 7.6 (wheezy) amd64 with backports kernel and SSH packages built on 2014-09-04", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "158683343", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/gce-nvme/global/images/nvme-backports-debian-7-wheezy-v20140926", "id": "16689990597272015714", "creationTimestamp": "2014-10-20T22:01:24.039-07:00", "name": "nvme-backports-debian-7-wheezy-v20140926", "description": "NVMe optimized Debian GNU/Linux 7.6 (wheezy) amd64 with backports kernel and SSH packages built on 2014-09-26", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "221456136", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/gce-nvme/global/images/nvme-backports-debian-7-wheezy-v20141108", "id": "9219956677399420856", "creationTimestamp": "2014-11-14T10:55:03.809-08:00", "name": "nvme-backports-debian-7-wheezy-v20141108", "description": "NVMe optimized Debian GNU/Linux 7.6 (wheezy) amd64 with backports kernel and SSH packages built on 2014-11-08", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "162245079", "diskSizeGb": "10" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json0000664000175000017500000000277712701023453033303 0ustar kamikami00000000000000{ "id": "projects/project_name/regions/us-central1/forwardingRules", "items": [ { "IPAddress": "173.255.119.224", "IPProtocol": "TCP", "creationTimestamp": "2013-09-03T00:17:25.544-07:00", "id": "10901665092293158938", "kind": "compute#forwardingRule", "name": "lcforwardingrule", "portRange": "8000-8500", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }, { "IPAddress": "173.255.119.185", "IPProtocol": "TCP", "creationTimestamp": "2013-09-02T22:25:50.575-07:00", "id": "15826316229163619337", "kind": "compute#forwardingRule", "name": "libcloud-lb-demo-lb", "portRange": "80-80", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" } ], "kind": "compute#forwardingRuleList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules" }././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_setCommonInstanceMetadata.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_setCommonInstanceMetad0000664000175000017500000000120512701023453034122 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-setCommonInstanceMetadat", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-setCommonInstanceMetadata", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "DONE", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/setCommonInstanceMetadata", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstancegroup2.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstanc0000664000175000017500000000132613153541406033764 0ustar kamikami00000000000000{ "kind": "compute#instanceGroup", "id": "1968709502073089768", "creationTimestamp": "2016-08-26T16:53:42.413-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "name": "myinstancegroup2", "description": "myinstancegroup2", "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default", "fingerprint": "42WmSpB8rSN=", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup2", "size": 4, "subnetwork": "https://content.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json0000664000175000017500000000354112701023453032011 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/us-central1-a/instances", "items": [ { "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "node-name", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ], "kind": "compute#instanceList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances" }././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctargetinstance.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctarge0000664000175000017500000000100112701023453033676 0ustar kamikami00000000000000{ "kind": "compute#targetInstance", "id": "9539205115599811578", "creationTimestamp": "2014-08-07T13:09:19.634-07:00", "name": "lctargetinstance", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "natPolicy": "NO_NAT", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance" } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_lcaddressglobal_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_lcadd0000664000175000017500000000117312701023453034146 0ustar kamikami00000000000000{ "id": "7128783508312083402", "insertTime": "2013-06-26T12:21:44.075-07:00", "kind": "compute#operation", "name": "operation-global_addresses_lcaddressglobal_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_addresses_lcaddressglobal_delete", "startTime": "2013-06-26T12:21:44.110-07:00", "status": "DONE", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_setMachineType_notstopped.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_se0000664000175000017500000000176012701023453034151 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "1122640775725896976", "name": "operation-setMachineType-notstopped", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setMachineType", "targetLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "targetId": "10940055899777330894", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2016-03-07T08:09:34.445-08:00", "startTime": "2016-03-07T08:09:34.679-08:00", "endTime": "2016-03-07T08:09:35.161-08:00", "error": { "errors": [ { "code": "RESOURCE_NOT_READY", "message": "The resource 'projects/project_name/zones/us-central1-a/instances/node-name' is not ready" } ] }, "httpErrorStatusCode": 400, "httpErrorMessage": "BAD REQUEST", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-setMachineType-notstopped" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfir0000664000175000017500000000123412701023453034207 0ustar kamikami00000000000000{ "endTime": "2013-06-26T20:52:10.075-07:00", "id": "6526551968265354277", "insertTime": "2013-06-26T20:52:00.355-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_lcfirewall_put", "operationType": "update", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", "startTime": "2013-06-26T20:52:00.410-07:00", "status": "DONE", "targetId": "10942695305090163011", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.0000664000175000017500000000121112701023453034146 0ustar kamikami00000000000000{ "endTime": "2013-06-26T20:51:12.108-07:00", "id": "16789512465352307784", "insertTime": "2013-06-26T20:51:06.068-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_post", "startTime": "2013-06-26T20:51:06.128-07:00", "status": "DONE", "targetId": "10942695305090163011", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json0000664000175000017500000000074512701023453031667 0ustar kamikami00000000000000{ "allowed": [ { "IPProtocol": "tcp", "ports": [ "4567" ] } ], "creationTimestamp": "2013-06-26T10:04:43.773-07:00", "id": "0565629596395414121", "kind": "compute#firewall", "name": "lcfirewall", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "sourceTags": [ "libcloud" ] }././@LongLink0000000000000000000000000000022300000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_done.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000132312701023453033777 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:08.382-07:00", "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node_name_deleteAccessConfig_post", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node_name_deleteAccessConfig_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "DONE", "targetId": "16630486471904253898", "user": "foo@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_insert.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_i0000664000175000017500000000163013153541406033663 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "5029726187056811377", "name": "operation-zones_us-central1-a_instanceGroupManagers_insert_post", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "compute.instanceGroupManagers.insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroupManagers/managed-instance-group-1", "targetId": "1549031063416753526", "status": "PENDING", "user": "999999-compute@developer.gserviceaccount.com", "progress": 90, "insertTime": "2016-08-12T09:04:46.114-07:00", "startTime": "2016-08-12T09:04:46.117-07:00", "endTime": "2016-08-12T09:04:46.117-07:00", "selfLink" : "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instanceGroupManagers_insert_post" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/project.json0000664000175000017500000000357412701023453025624 0ustar kamikami00000000000000{ "kind": "compute#project", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name", "id": "8116069320260064853", "creationTimestamp": "2014-01-21T10:30:53.390-08:00", "name": "project_name", "description": "", "commonInstanceMetadata": { "kind": "compute#metadata", "fingerprint": "3zEcGBxH6Vs=", "items": [ { "key": "sshKeys", "value": "ABCDEF" }, { "key": "startup-script", "value": "#!/bin/bash\n\nAUTO_SCRIPT=$(curl -s http://metadata/computeMetadata/v1/instance/attributes/my-auto-script -H \"Metadata-Flavor: Google\")\nCHECK=${AUTO_SCRIPT:-disabled}\n\nif [ \"${CHECK}\" = \"enabled\" -a -f /etc/debian_version ]; then\n export DEBIAN_FRONTEND=noninteractive\n apt-get -q -y update\n apt-get -q -y install git vim tmux\n fi\nexit 0\n" } ] }, "quotas": [ { "metric": "SNAPSHOTS", "limit": 1000, "usage": 1 }, { "metric": "NETWORKS", "limit": 5, "usage": 3 }, { "metric": "FIREWALLS", "limit": 100, "usage": 6 }, { "metric": "IMAGES", "limit": 100, "usage": 1 }, { "metric": "STATIC_ADDRESSES", "limit": 7, "usage": 1 }, { "metric": "ROUTES", "limit": 100, "usage": 2 }, { "metric": "FORWARDING_RULES", "limit": 50, "usage": 0 }, { "metric": "TARGET_POOLS", "limit": 50, "usage": 0 }, { "metric": "HEALTH_CHECKS", "limit": 50, "usage": 1 }, { "metric": "IN_USE_ADDRESSES", "limit": 23, "usage": 0 }, { "metric": "TARGET_INSTANCES", "limit": 50, "usage": 3 }, { "metric": "TARGET_HTTP_PROXIES", "limit": 50, "usage": 0 }, { "metric": "URL_MAPS", "limit": 50, "usage": 1 }, { "metric": "BACKEND_SERVICES", "limit": 50, "usage": 1 } ], "usageExportLocation": { "bucketName": "gs://graphite-usage-reports", "reportNamePrefix": "graphite-report" } } ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices_web_service_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices0000664000175000017500000000124312701023453034133 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_backendServices_web_service_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "targetId": "15555555555223232737", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-28T12:51:20.402-07:00", "startTime": "2014-10-28T12:51:20.623-07:00", "endTime": "2014-10-28T12:51:21.218-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_backendServices_web_service_delete" } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_del0000664000175000017500000000140412701023453033604 0ustar kamikami00000000000000{ "id": "6999931397447918763", "insertTime": "2013-06-26T10:05:40.350-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", "startTime": "2013-06-26T10:05:40.405-07:00", "status": "PENDING", "targetId": "07410051435384876224", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_targetHttpProxies_web_proxy_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_targetHttpProxies_web_proxy_delete.j0000664000175000017500000000116712701023453034072 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "33333333333333333333", "name": "operation_global_targetHttpProxies_web_proxy_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "targetId": "5243939392541625113", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-28T12:21:47.406-07:00", "startTime": "2014-10-28T12:21:47.666-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_targetHttpProxies_web_proxy_delete" } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_set0000664000175000017500000000135612701023453034067 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "14265294323024381703", "name": "operation-volume-auto-delete", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "setDiskAutoDelete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", "targetId": "4313186599918690450", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-03-13T21:50:57.612-07:00", "startTime": "2014-03-13T21:50:57.717-07:00", "endTime": "2014-03-13T21:50:58.047-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_coreos-cloud_global_images.json0000664000175000017500000014547213153541406033343 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images", "id": "projects/coreos-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-282-0-0-v20140410", "id": "4545075671331449642", "creationTimestamp": "2014-04-10T13:37:09.105-07:00", "name": "coreos-alpha-282-0-0-v20140410", "description": "CoreOS (alpha)", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "191704931", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-298-0-0-v20140425", "id": "13394839002167516366", "creationTimestamp": "2014-04-25T16:05:41.718-07:00", "name": "coreos-alpha-298-0-0-v20140425", "description": "CoreOS alpha 298.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "194267903", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-310-1-0-v20140508", "id": "11111206691445863910", "creationTimestamp": "2014-05-07T17:20:35.575-07:00", "name": "coreos-alpha-310-1-0-v20140508", "description": "CoreOS alpha 310.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "196010234", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-315-0-0-v20140512", "id": "16022942869504160", "creationTimestamp": "2014-05-12T16:24:23.130-07:00", "name": "coreos-alpha-315-0-0-v20140512", "description": "CoreOS alpha 315.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "195832144", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-317-0-0-v20140515", "id": "11739259381666430485", "creationTimestamp": "2014-05-15T10:42:51.748-07:00", "name": "coreos-alpha-317-0-0-v20140515", "description": "CoreOS alpha 317.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "195591890", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-324-1-0-v20140522", "id": "3998011925663216170", "creationTimestamp": "2014-05-22T11:10:59.683-07:00", "name": "coreos-alpha-324-1-0-v20140522", "description": "CoreOS alpha 324.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "198854961", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-324-2-0-v20140528", "id": "6833110226481787934", "creationTimestamp": "2014-05-28T12:04:45.280-07:00", "name": "coreos-alpha-324-2-0-v20140528", "description": "CoreOS alpha 324.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "198872299", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-324-3-0-v20140530", "id": "2096033640183904088", "creationTimestamp": "2014-05-30T10:10:54.644-07:00", "name": "coreos-alpha-324-3-0-v20140530", "description": "CoreOS alpha 324.3.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "198897147", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-324-4-0-v20140607", "id": "13657407932096700402", "creationTimestamp": "2014-06-06T17:48:24.952-07:00", "name": "coreos-alpha-324-4-0-v20140607", "description": "CoreOS alpha 324.4.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "198687188", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-324-5-0-v20140607", "id": "2289397358548509631", "creationTimestamp": "2014-06-07T15:11:14.415-07:00", "name": "coreos-alpha-324-5-0-v20140607", "description": "CoreOS alpha 324.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "198701072", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-338-0-0-v20140604", "id": "6110535785379416957", "creationTimestamp": "2014-06-04T15:57:45.096-07:00", "name": "coreos-alpha-338-0-0-v20140604", "description": "CoreOS alpha 338.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204832142", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-342-0-0-v20140608", "id": "14338602172513809268", "creationTimestamp": "2014-06-08T10:50:15.283-07:00", "name": "coreos-alpha-342-0-0-v20140608", "description": "CoreOS alpha 342.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204558347", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-342-1-0-v20140608", "id": "13330227154553534732", "creationTimestamp": "2014-06-08T14:57:01.770-07:00", "name": "coreos-alpha-342-1-0-v20140608", "description": "CoreOS alpha 342.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204688415", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-343-0-0-v20140609", "id": "1988715371441632844", "creationTimestamp": "2014-06-09T14:29:28.178-07:00", "name": "coreos-alpha-343-0-0-v20140609", "description": "CoreOS alpha 343.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204553796", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-349-0-0-v20140616", "id": "16583233600096079991", "creationTimestamp": "2014-06-16T15:42:37.127-07:00", "name": "coreos-alpha-349-0-0-v20140616", "description": "CoreOS alpha 349.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204556764", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-353-0-0-v20140621", "id": "6651180993237136697", "creationTimestamp": "2014-06-20T17:12:53.636-07:00", "name": "coreos-alpha-353-0-0-v20140621", "description": "CoreOS alpha 353.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204893692", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-361-0-0-v20140627", "id": "16595596722360750984", "creationTimestamp": "2014-06-27T11:18:42.680-07:00", "name": "coreos-alpha-361-0-0-v20140627", "description": "CoreOS alpha 361.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204875098", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-367-0-0-v20140703", "id": "14830409198434884170", "creationTimestamp": "2014-07-03T15:13:10.342-07:00", "name": "coreos-alpha-367-0-0-v20140703", "description": "CoreOS alpha 367.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "202900963", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-367-1-0-v20140713", "id": "4506608247976466482", "creationTimestamp": "2014-07-12T19:19:03.327-07:00", "name": "coreos-alpha-367-1-0-v20140713", "description": "CoreOS alpha 367.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "202819993", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-379-2-0-v20140715", "id": "10214880843477717813", "creationTimestamp": "2014-07-15T16:26:03.323-07:00", "name": "coreos-alpha-379-2-0-v20140715", "description": "CoreOS alpha 379.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204225959", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-379-3-0-v20140716", "id": "9717320542649493270", "creationTimestamp": "2014-07-16T10:06:14.830-07:00", "name": "coreos-alpha-379-3-0-v20140716", "description": "CoreOS alpha 379.3.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "204475873", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-386-1-0-v20140723", "id": "8762908517570017855", "creationTimestamp": "2014-07-23T13:21:42.787-07:00", "name": "coreos-alpha-386-1-0-v20140723", "description": "CoreOS alpha 386.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "214809962", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-394-0-0-v20140801", "id": "13449834147524152564", "creationTimestamp": "2014-08-01T11:20:09.394-07:00", "name": "coreos-alpha-394-0-0-v20140801", "description": "CoreOS alpha 394.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "215030100", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-399-0-0-v20140806", "id": "9424565426493971034", "creationTimestamp": "2014-08-05T17:45:57.966-07:00", "name": "coreos-alpha-399-0-0-v20140806", "description": "CoreOS alpha 399.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "218427609", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807", "id": "17172174175513198150", "creationTimestamp": "2014-08-07T16:54:36.859-07:00", "name": "coreos-alpha-402-2-0-v20140807", "description": "CoreOS alpha 402.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "218464656", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-410-0-0-v20140818", "id": "5447152525517666471", "creationTimestamp": "2014-08-18T11:28:12.278-07:00", "name": "coreos-alpha-410-0-0-v20140818", "description": "CoreOS alpha 410.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "218443034", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-423-0-0-v20140828", "id": "3564803995591182122", "creationTimestamp": "2014-08-28T15:39:30.525-07:00", "name": "coreos-alpha-423-0-0-v20140828", "description": "CoreOS alpha 423.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "210763062", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-431-0-0-v20140905", "id": "2005848009016889709", "creationTimestamp": "2014-09-05T13:53:53.863-07:00", "name": "coreos-alpha-431-0-0-v20140905", "description": "CoreOS alpha 431.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "208605209", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-435-0-0-v20140910", "id": "13332314615344703276", "creationTimestamp": "2014-09-10T11:54:21.707-07:00", "name": "coreos-alpha-435-0-0-v20140910", "description": "CoreOS alpha 435.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "210142911", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-438-0-0-v20140913", "id": "5230953556156640067", "creationTimestamp": "2014-09-13T12:12:44.675-07:00", "name": "coreos-alpha-438-0-0-v20140913", "description": "CoreOS alpha 438.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "210475454", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-440-0-0-v20140915", "id": "5334314307925303424", "creationTimestamp": "2014-09-15T15:21:20.116-07:00", "name": "coreos-alpha-440-0-0-v20140915", "description": "CoreOS alpha 440.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "210345834", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-444-0-0-v20140919", "id": "13961453531244096097", "creationTimestamp": "2014-09-19T13:43:08.003-07:00", "name": "coreos-alpha-444-0-0-v20140919", "description": "CoreOS alpha 444.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "210601371", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-452-0-0-v20140926", "id": "15205532858154654172", "creationTimestamp": "2014-09-26T13:13:30.539-07:00", "name": "coreos-alpha-452-0-0-v20140926", "description": "CoreOS alpha 452.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "211112758", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-457-0-0-v20141002", "id": "9789565658376148526", "creationTimestamp": "2014-10-01T18:49:01.683-07:00", "name": "coreos-alpha-457-0-0-v20141002", "description": "CoreOS alpha 457.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "211116656", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-459-0-0-v20141003", "id": "12778368443622282257", "creationTimestamp": "2014-10-03T15:37:32.621-07:00", "name": "coreos-alpha-459-0-0-v20141003", "description": "CoreOS alpha 459.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "211079895", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-471-1-0-v20141016", "id": "5868321226342539344", "creationTimestamp": "2014-10-15T17:58:49.120-07:00", "name": "coreos-alpha-471-1-0-v20141016", "description": "CoreOS alpha 471.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "212426664", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-472-0-0-v20141017", "id": "4576009004678324232", "creationTimestamp": "2014-10-17T13:25:52.653-07:00", "name": "coreos-alpha-472-0-0-v20141017", "description": "CoreOS alpha 472.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "209948512", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-490-0-0-v20141104", "id": "13920393620518010014", "creationTimestamp": "2014-11-04T14:22:35.647-08:00", "name": "coreos-alpha-490-0-0-v20141104", "description": "CoreOS alpha 490.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "214899846", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-493-0-0-v20141107", "id": "6901289773390612149", "creationTimestamp": "2014-11-06T17:07:25.804-08:00", "name": "coreos-alpha-493-0-0-v20141107", "description": "CoreOS alpha 493.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "214900059", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-494-0-0-v20141108", "id": "16984733952836989634", "creationTimestamp": "2014-11-08T08:14:39.763-08:00", "name": "coreos-alpha-494-0-0-v20141108", "description": "CoreOS alpha 494.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "217341810", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-505-1-0-v20141119", "id": "6377208203517836508", "creationTimestamp": "2014-11-19T11:58:34.839-08:00", "name": "coreos-alpha-505-1-0-v20141119", "description": "CoreOS alpha 505.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220202141", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-507-0-0-v20141121", "id": "2812520923267400303", "creationTimestamp": "2014-11-20T16:53:33.724-08:00", "name": "coreos-alpha-507-0-0-v20141121", "description": "CoreOS alpha 507.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220243128", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-509-0-0-v20141122", "id": "17634437931750438301", "creationTimestamp": "2014-11-22T12:07:41.631-08:00", "name": "coreos-alpha-509-0-0-v20141122", "description": "CoreOS alpha 509.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "215896616", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-509-1-0-v20141124", "id": "15997978649119901106", "creationTimestamp": "2014-11-24T13:54:40.347-08:00", "name": "coreos-alpha-509-1-0-v20141124", "description": "CoreOS alpha 509.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220158301", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-522-0-0-v20141205", "id": "3688346254254255171", "creationTimestamp": "2014-12-05T11:12:11.016-08:00", "name": "coreos-alpha-522-0-0-v20141205", "description": "CoreOS alpha 522.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220671632", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-522-1-0-v20141211", "id": "17994276440547647719", "creationTimestamp": "2014-12-11T12:59:25.259-08:00", "name": "coreos-alpha-522-1-0-v20141211", "description": "CoreOS alpha 522.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220746106", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-522-2-0-v20141217", "id": "14750233033823203541", "creationTimestamp": "2014-12-17T11:07:40.413-08:00", "name": "coreos-alpha-522-2-0-v20141217", "description": "CoreOS alpha 522.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219" }, "status": "READY", "archiveSizeBytes": "220706464", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-534-1-0-v20141219", "id": "9382201742109997553", "creationTimestamp": "2014-12-18T16:02:57.257-08:00", "name": "coreos-alpha-534-1-0-v20141219", "description": "CoreOS alpha 534.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-536-0-0-v20141220" }, "status": "READY", "archiveSizeBytes": "150760714", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-536-0-0-v20141220", "id": "17499586025310826725", "creationTimestamp": "2014-12-20T13:19:46.596-08:00", "name": "coreos-alpha-536-0-0-v20141220", "description": "CoreOS alpha 536.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-540-0-0-v20141223" }, "status": "READY", "archiveSizeBytes": "150727446", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-540-0-0-v20141223", "id": "2686834707529846588", "creationTimestamp": "2014-12-23T13:10:58.507-08:00", "name": "coreos-alpha-540-0-0-v20141223", "description": "CoreOS alpha 540.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "147379024", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-310-1-0-v20140508", "id": "2504761896178375059", "creationTimestamp": "2014-05-08T16:21:25.030-07:00", "name": "coreos-beta-310-1-0-v20140508", "description": "CoreOS beta 310.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "196007489", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-324-3-0-v20140602", "id": "5956689618368737465", "creationTimestamp": "2014-06-02T13:23:43.465-07:00", "name": "coreos-beta-324-3-0-v20140602", "description": "CoreOS beta 324.3.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "198895988", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-324-5-0-v20140609", "id": "11953277661482892448", "creationTimestamp": "2014-06-09T09:49:34.235-07:00", "name": "coreos-beta-324-5-0-v20140609", "description": "CoreOS beta 324.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "198718133", "diskSizeGb": "6" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-353-0-0-v20140625", "id": "11516237452648076812", "creationTimestamp": "2014-06-25T13:29:04.367-07:00", "name": "coreos-beta-353-0-0-v20140625", "description": "CoreOS beta 353.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "204844190", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-367-1-0-v20140715", "id": "12967199568595368851", "creationTimestamp": "2014-07-15T16:24:00.178-07:00", "name": "coreos-beta-367-1-0-v20140715", "description": "CoreOS beta 367.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "202795742", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-410-0-0-v20140825", "id": "5879404002456449175", "creationTimestamp": "2014-08-25T12:43:37.337-07:00", "name": "coreos-beta-410-0-0-v20140825", "description": "CoreOS beta 410.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "218397098", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-440-0-0-v20140918", "id": "16470992918251712233", "creationTimestamp": "2014-09-18T15:25:30.677-07:00", "name": "coreos-beta-440-0-0-v20140918", "description": "CoreOS beta 440.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "210355603", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-444-2-0-v20140926", "id": "12871847181887001345", "creationTimestamp": "2014-09-26T08:59:23.897-07:00", "name": "coreos-beta-444-2-0-v20140926", "description": "CoreOS beta 444.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "210521891", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-444-3-0-v20141002", "id": "7978986993043599036", "creationTimestamp": "2014-10-01T18:58:31.452-07:00", "name": "coreos-beta-444-3-0-v20141002", "description": "CoreOS beta 444.3.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "210748822", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-444-4-0-v20141007", "id": "3564014796395385926", "creationTimestamp": "2014-10-07T15:07:46.523-07:00", "name": "coreos-beta-444-4-0-v20141007", "description": "CoreOS beta 444.4.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "210707776", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-444-5-0-v20141016", "id": "13297038239143916423", "creationTimestamp": "2014-10-15T19:47:19.726-07:00", "name": "coreos-beta-444-5-0-v20141016", "description": "CoreOS beta 444.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "210865263", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-0-0-v20141117", "id": "8506154037185657987", "creationTimestamp": "2014-11-17T10:16:15.255-08:00", "name": "coreos-beta-494-0-0-v20141117", "description": "CoreOS beta 494.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "217341172", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-1-0-v20141124", "id": "2468958217438571789", "creationTimestamp": "2014-11-24T14:02:13.599-08:00", "name": "coreos-beta-494-1-0-v20141124", "description": "CoreOS beta 494.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "217657127", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-4-0-v20141204", "id": "3901953085416533827", "creationTimestamp": "2014-12-04T15:52:43.520-08:00", "name": "coreos-beta-494-4-0-v20141204", "description": "CoreOS beta 494.4.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211" }, "status": "READY", "archiveSizeBytes": "217129760", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-494-5-0-v20141211", "id": "11721688361939601432", "creationTimestamp": "2014-12-11T13:07:37.057-08:00", "name": "coreos-beta-494-5-0-v20141211", "description": "CoreOS beta 494.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-522-2-0-v20141218" }, "status": "READY", "archiveSizeBytes": "217091382", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-522-2-0-v20141218", "id": "18164147672853893958", "creationTimestamp": "2014-12-18T13:29:11.177-08:00", "name": "coreos-beta-522-2-0-v20141218", "description": "CoreOS beta 522.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-522-3-0-v20141226" }, "status": "READY", "archiveSizeBytes": "220704959", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-beta-522-3-0-v20141226", "id": "14171939663085407486", "creationTimestamp": "2014-12-26T15:04:01.237-08:00", "name": "coreos-beta-522-3-0-v20141226", "description": "CoreOS beta 522.3.0", "family": "coreos", "guestOsFeatures": [ { "type": "VIRTIO_SCSI_MULTIQUEUE" } ], "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "220932284", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-367-1-0-v20140724", "id": "2599882482782401961", "creationTimestamp": "2014-07-24T09:50:20.940-07:00", "name": "coreos-stable-367-1-0-v20140724", "description": "CoreOS stable 367.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" }, "status": "READY", "archiveSizeBytes": "202820713", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-410-0-0-v20140902", "id": "5505931863348151915", "creationTimestamp": "2014-09-02T09:51:46.932-07:00", "name": "coreos-stable-410-0-0-v20140902", "description": "CoreOS stable 410.0.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" }, "status": "READY", "archiveSizeBytes": "218443267", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-410-1-0-v20140926", "id": "8454778862121230636", "creationTimestamp": "2014-09-26T09:02:19.616-07:00", "name": "coreos-stable-410-1-0-v20140926", "description": "CoreOS stable 410.1.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" }, "status": "READY", "archiveSizeBytes": "218502022", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-410-2-0-v20141002", "id": "1371462217027433294", "creationTimestamp": "2014-10-01T21:02:17.237-07:00", "name": "coreos-stable-410-2-0-v20141002", "description": "CoreOS stable 410.2.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" }, "status": "READY", "archiveSizeBytes": "218492705", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-4-0-v20141010", "id": "12833596236536500344", "creationTimestamp": "2014-10-10T12:03:27.815-07:00", "name": "coreos-stable-444-4-0-v20141010", "description": "CoreOS stable 444.4.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" }, "status": "READY", "archiveSizeBytes": "210658089", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016", "id": "10607414105577455345", "creationTimestamp": "2014-10-16T13:19:45.855-07:00", "name": "coreos-stable-444-5-0-v20141016", "description": "CoreOS stable 444.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-3-0-v20141203" }, "status": "READY", "archiveSizeBytes": "210821109", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-3-0-v20141203", "id": "15950760641457393522", "creationTimestamp": "2014-12-03T10:58:23.402-08:00", "name": "coreos-stable-494-3-0-v20141203", "description": "CoreOS stable 494.3.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-4-0-v20141204" }, "status": "READY", "archiveSizeBytes": "216979469", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-4-0-v20141204", "id": "15925813888167964156", "creationTimestamp": "2014-12-04T12:34:55.496-08:00", "name": "coreos-stable-494-4-0-v20141204", "description": "CoreOS stable 494.4.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-5-0-v20141215" }, "status": "READY", "archiveSizeBytes": "217085384", "diskSizeGb": "9" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-494-5-0-v20141215", "id": "8254035885037496682", "creationTimestamp": "2014-12-15T11:57:55.509-08:00", "name": "coreos-stable-494-5-0-v20141215", "description": "CoreOS stable 494.5.0", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "217069504", "diskSizeGb": "9" } ] } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_addresses_lcaddressglobal_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_addresses_lcaddressglobal_delete.jso0000664000175000017500000000117412701023453034016 0ustar kamikami00000000000000{ "id": "7128783508312083402", "insertTime": "2013-06-26T12:21:44.075-07:00", "kind": "compute#operation", "name": "operation-global_addresses_lcaddressglobal_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_addresses_lcaddressglobal_delete", "startTime": "2013-06-26T12:21:44.110-07:00", "status": "PENDING", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "user": "foo@developer.gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json0000664000175000017500000003645512701023453032465 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/us-central1-a/machineTypes", "items": [ { "creationTimestamp": "2013-04-25T13:32:49.088-07:00", "description": "1 vCPU (shared physical core) and 0.6 GB RAM", "guestCpus": 1, "id": "1133568312750571513", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 614, "name": "f1-micro", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", "zone": "us-central1-a" }, { "creationTimestamp": "2013-04-25T13:32:45.550-07:00", "description": "1 vCPU (shared physical core) and 1.7 GB RAM", "guestCpus": 1, "id": "1500265464823777597", "imageSpaceGb": 0, "kind": "compute#machineType", "maximumPersistentDisks": 4, "maximumPersistentDisksSizeGb": "3072", "memoryMb": 1740, "name": "g1-small", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/g1-small", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:46:10.572-08:00", "description": "2 vCPUs, 1.8 GB RAM", "guestCpus": 2, "id": "16898271314080235997", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:47:07.825-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "15178384466070744001", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 1843, "name": "n1-highcpu-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:48:06.087-08:00", "description": "4 vCPUs, 3.6 GB RAM", "guestCpus": 4, "id": "04759000181765218034", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:49:07.563-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", "state": "DEPRECATED" }, "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "01151097524490134507", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3686, "name": "n1-highcpu-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:50:15.128-08:00", "description": "8 vCPUs, 7.2 GB RAM", "guestCpus": 8, "id": "01206886442411821831", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:51:04.549-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", "state": "DEPRECATED" }, "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "02507333096579477005", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7373, "name": "n1-highcpu-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:40:06.129-08:00", "description": "2 vCPUs, 13 GB RAM", "guestCpus": 2, "id": "05438694236916301519", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:40:59.630-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "00770157291441082211", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 13312, "name": "n1-highmem-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:42:08.983-08:00", "description": "4 vCPUs, 26 GB RAM", "guestCpus": 4, "id": "11556032176405786676", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:43:17.400-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "05095504563332567951", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 26624, "name": "n1-highmem-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:44:25.985-08:00", "description": "8 vCPUs, 52 GB RAM", "guestCpus": 8, "id": "01717932668777642040", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", "zone": "us-central1-a" }, { "creationTimestamp": "2012-11-16T11:45:08.195-08:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "07181827135536388552", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 53248, "name": "n1-highmem-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:34.258-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "state": "DEPRECATED" }, "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", "guestCpus": 1, "id": "10583029372018866711", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1-d", "scratchDisks": [ { "diskGb": 420 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:48:56.867-07:00", "description": "2 vCPUs, 7.5 GB RAM", "guestCpus": 2, "id": "17936898073622676356", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:49:19.448-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", "state": "DEPRECATED" }, "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", "guestCpus": 2, "id": "06313284160910191442", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 7680, "name": "n1-standard-2-d", "scratchDisks": [ { "diskGb": 870 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:49:40.050-07:00", "description": "4 vCPUs, 15 GB RAM", "guestCpus": 4, "id": "09494636486174545828", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:50:05.677-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", "state": "DEPRECATED" }, "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", "guestCpus": 4, "id": "00523085164784013586", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 15360, "name": "n1-standard-4-d", "scratchDisks": [ { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:50:42.334-07:00", "description": "8 vCPUs, 30 GB RAM", "guestCpus": 8, "id": "04084282969223214132", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 30720, "name": "n1-standard-8", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8", "zone": "us-central1-a" }, { "creationTimestamp": "2012-06-07T13:51:19.936-07:00", "deprecated": { "deprecated": "2013-12-02T20:00:00-08:00", "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8", "state": "DEPRECATED" }, "description": "8 vCPUs, 30 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", "guestCpus": 8, "id": "00035824420671580077", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 30720, "name": "n1-standard-8-d", "scratchDisks": [ { "diskGb": 1770 }, { "diskGb": 1770 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8-d", "zone": "us-central1-a" } ], "kind": "compute#machineTypeList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_addresses_post.json0000664000175000017500000000106312701023453030507 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-global_addresses_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_addresses_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "user": "foo@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_det0000664000175000017500000000143012701023453033613 0ustar kamikami00000000000000{ "id": "3921383727105838816", "insertTime": "2013-06-26T16:48:35.357-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", "operationType": "detachDisk", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", "startTime": "2013-06-26T16:48:35.398-07:00", "status": "PENDING", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.js0000664000175000017500000000150012701023453033726 0ustar kamikami00000000000000{ "creationTimestamp": "2013-08-19T14:43:25.289-07:00", "description": "", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" ], "id": "09965129111508633746", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www1", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www2", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www3" ], "kind": "compute#targetPool", "name": "www-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/www-pool" }././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_rhel-cloud_global_licenses_rhel_server.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_rhel-cloud_global_licenses_rhel_se0000664000175000017500000000027412701023453034055 0ustar kamikami00000000000000{ "kind": "compute#license", "selfLink": "https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/rhel-7-server", "name": "rhel-7-server", "chargesUseFee": true } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_sslcertificates.json0000664000175000017500000000114313153541406030660 0ustar kamikami00000000000000{ "kind": "compute#sslCertificateList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/sslCertificates", "id": "projects/project_name/global/sslCertificates", "items": [ { "kind": "compute#sslCertificate", "id": "2064539516762881220", "creationTimestamp": "2016-08-30T10:28:11.926-07:00", "name": "example", "description": "my example ssl cert.", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/sslCertificates/example-cert", "certificate": "-----BEGIN CERTIFICATE-----\nfoobar==\n-----END CERTIFICATE-----\n" } ] } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_setNamedPorts.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_s0000664000175000017500000000117313153541406034116 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.setNamedPorts", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_setNamedPorts", "user": "1264195755357-compute@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices_no_backends.json0000664000175000017500000000074412701023453033113 0ustar kamikami00000000000000{ "kind": "compute#backendService", "id": "12158223690162062306", "creationTimestamp": "2014-08-14T14:37:36.728-07:00", "name": "web-service", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" ], "timeoutSec": 30, "port": 80, "protocol": "HTTP", "fingerprint": "5qm-QyYGyzw=", "portName": "" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_default.json0000664000175000017500000000046012701023453031045 0ustar kamikami00000000000000{ "IPv4Range": "10.240.0.0/16", "creationTimestamp": "2013-06-19T12:37:13.233-07:00", "gatewayIPv4": "10.240.0.1", "id": "08257021638942464470", "kind": "compute#network", "name": "default", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default" }././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthCheck0000664000175000017500000000117412701023453034106 0ustar kamikami00000000000000{ "id": "1159296103027566387", "insertTime": "2013-09-02T22:18:02.509-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_lchealthcheck_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_delete", "startTime": "2013-09-02T22:18:02.558-07:00", "status": "DONE", "targetId": "06860603312991823381", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_routes_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_routes_post.jso0000664000175000017500000000104412701023453034217 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17322940416642455149", "name": "operation-global_routes_lcdemoroute_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2014-11-25T11:00:44.049-08:00", "startTime": "2014-11-25T11:00:44.385-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_routes_lcdemoroute_post" } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standa0000664000175000017500000000073612701023453033556 0ustar kamikami00000000000000{ "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "zone": "europe-west1-a" }././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_subnetworks_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_su0000664000175000017500000000137613153541406034127 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_subnetworks_post", "operationType": "insert", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_subnetworks_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "DONE", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "user": "897001307951@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_instanceTemplates_insert.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_instanceTemplat0000664000175000017500000000113713153541406034202 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "my_instance_template1", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my_instance_template1", "operationType": "compute.instanceTemplates.insert", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_instanceTemplates_my_instance_template1_insert", "user": "1264195755357-compute@developer.gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions.json0000664000175000017500000000574312701023453025624 0ustar kamikami00000000000000{ "id": "projects/project_name/regions", "items": [ { "creationTimestamp": "2013-04-19T17:58:16.641-07:00", "description": "europe-west1", "id": "0827308347805275727", "kind": "compute#region", "name": "europe-west1", "quotas": [ { "limit": 24.0, "metric": "CPUS", "usage": 0.0 }, { "limit": 5120.0, "metric": "DISKS_TOTAL_GB", "usage": 0.0 }, { "limit": 7.0, "metric": "STATIC_ADDRESSES", "usage": 0.0 }, { "limit": 23.0, "metric": "IN_USE_ADDRESSES", "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b" ] }, { "creationTimestamp": "2013-04-19T18:17:05.050-07:00", "description": "us-central1", "id": "06713580496607310378", "kind": "compute#region", "name": "us-central1", "quotas": [ { "limit": 24.0, "metric": "CPUS", "usage": 3.0 }, { "limit": 5120.0, "metric": "DISKS_TOTAL_GB", "usage": 10.0 }, { "limit": 7.0, "metric": "STATIC_ADDRESSES", "usage": 0.0 }, { "limit": 23.0, "metric": "IN_USE_ADDRESSES", "usage": 4.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" ] }, { "creationTimestamp": "2013-04-19T18:19:05.482-07:00", "description": "us-central2", "id": "04157375529195793136", "kind": "compute#region", "name": "us-central2", "quotas": [ { "limit": 24.0, "metric": "CPUS", "usage": 0.0 }, { "limit": 5120.0, "metric": "DISKS_TOTAL_GB", "usage": 0.0 }, { "limit": 7.0, "metric": "STATIC_ADDRESSES", "usage": 0.0 }, { "limit": 23.0, "metric": "IN_USE_ADDRESSES", "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central2", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" ] } ], "kind": "compute#regionList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions" }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_setUsageExportBucket.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_setUsageExportBucket.j0000664000175000017500000000105012701023453034064 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17203609782824174066", "name": "operation-setUsageExportBucket", "operationType": "setUsageExportBucket", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name", "targetId": "8116069320260064853", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2014-11-21T06:58:03.602-08:00", "startTime": "2014-11-21T06:58:04.018-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-setUsageExportBucket" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions-paged-1.json0000664000175000017500000000421712701023453027033 0ustar kamikami00000000000000{ "kind": "compute#regionList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions", "id": "projects/project_name/regions", "items": [ { "kind": "compute#region", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/asia-east1", "id": "1220", "creationTimestamp": "2014-04-11T13:47:12.495-07:00", "name": "asia-east1", "description": "asia-east1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/asia-east1-a" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 0.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 5120.0, "usage": 0.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 0.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 0.0 }, { "metric": "SSD_TOTAL_GB", "limit": 1024.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 1500.0, "usage": 0.0 } ] }, { "kind": "compute#region", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "id": "1100", "creationTimestamp": "2014-04-11T13:47:12.495-07:00", "name": "europe-west1", "description": "europe-west1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 0.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 5120.0, "usage": 0.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 0.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 0.0 }, { "metric": "SSD_TOTAL_GB", "limit": 1024.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 1500.0, "usage": 0.0 } ] } ], "nextPageToken": "CjQIz5W-w6HRxAI6KQoCGAEKAiAACgIYAQoCIAAKAhgTCg4qDGV1cm9wZS13ZXN0MQoDIMwI" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_getHealth.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_get0000664000175000017500000000042212701023453034007 0ustar kamikami00000000000000{ "kind": "compute#targetPoolInstanceHealth", "healthStatus": [ { "ipAddress": "130.99.99.99", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/libcloud-lb-demo-www-000", "healthState": "UNHEALTHY" } ] } ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000141112701023453033775 0ustar kamikami00000000000000{ "id": "10507122129283663728", "insertTime": "2013-06-26T15:03:02.766-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_reset_post", "operationType": "reset", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", "startTime": "2013-06-26T15:03:02.813-07:00", "status": "DONE", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_diskTypes_pd_standard.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_diskTypes_pd_standard.0000664000175000017500000000067312701023453033657 0ustar kamikami00000000000000{ "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "defaultDiskSizeGb": "500", "description": "Standard Persistent Disk", "kind": "compute#diskType", "name": "pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/gifted-electron-225/zones/europe-west1-a/diskTypes/pd-standard", "validDiskSize": "10GB-10240GB", "zone": "https://www.googleapis.com/compute/v1/projects/gifted-electron-225/zones/europe-west1-a" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_startnode.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_st0000664000175000017500000000123012701023453034160 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-startnode", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "start", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "targetId": "12335588484913203363", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-startnode" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_sslcertificates_example.json0000664000175000017500000000057113153541406032377 0ustar kamikami00000000000000{ "kind": "compute#sslCertificate", "id": "2064539516762881220", "creationTimestamp": "2016-08-30T10:28:11.926-07:00", "name": "example", "description": "my example ssl cert.", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/sslCertificates/example", "certificate": "-----BEGIN CERTIFICATE-----\nfoobar==\n-----END CERTIFICATE-----\n" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json0000664000175000017500000000140312701023453033253 0ustar kamikami00000000000000{ "id": "1510575454210533141", "insertTime": "2013-06-26T20:57:34.366-07:00", "kind": "compute#operation", "name": "operation-zones_europe-west1-a_instances_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", "startTime": "2013-06-26T20:57:34.453-07:00", "status": "PENDING", "targetId": "14308265828754333159", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-east1_subnetworks_cf_972cf02e6ad49113.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-east1_subnetworks_cf_972cf02e6ad0000664000175000017500000000102213153541406033317 0ustar kamikami00000000000000{ "kind": "compute#subnetwork", "id": "4297043163355844284", "creationTimestamp": "2016-10-01T05:34:27.209-07:00", "gatewayAddress": "10.128.0.1", "name": "cf-972cf02e6ad49113", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.128.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49113" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-east1-b_instanceGroupManagers.json0000664000175000017500000000223113153541406033626 0ustar kamikami00000000000000{ "kind": "compute#instanceGroupManagerList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroupManagers", "id": "projects/project_name/zones/us-east1-b/instanceGroupManagers", "items": [ { "kind": "compute#instanceGroupManager", "id": "8588434319274454491", "creationTimestamp": "2016-07-18T10:34:44.679-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b", "name": "myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroups/myinstancegroup", "baseInstanceName": "myinstancegroup", "fingerprint": "5bKcxzAnGOg=", "currentActions": { "none": 2, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 2, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b/instanceGroupManagers/myinstancegroup" } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones.json0000664000175000017500000000711612701023453025310 0ustar kamikami00000000000000{ "id": "projects/project_name/zones", "items": [ { "kind": "compute#zone", "selfLink": "https://www.googleapis.com/compute/v1/projects/verb-test/zones/asia-east1-a", "id": "2220", "creationTimestamp": "2014-05-30T18:35:16.575-07:00", "name": "asia-east1-a", "description": "asia-east1-a", "status": "UP", "region": "https://www.googleapis.com/compute/v1/projects/verb-test/regions/asia-east1" }, { "creationTimestamp": "2013-02-05T16:19:23.254-08:00", "description": "europe-west1-a", "id": "13416642339679437530", "kind": "compute#zone", "maintenanceWindows": [ { "beginTime": "2014-01-18T12:00:00.000-08:00", "description": "maintenance zone", "endTime": "2014-02-02T12:00:00.000-08:00", "name": "2014-01-18-planned-outage" } ], "name": "europe-west1-a", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", "status": "UP" }, { "creationTimestamp": "2013-02-05T16:19:23.254-08:00", "description": "europe-west1-b", "id": "20623650177407096", "kind": "compute#zone", "maintenanceWindows": [ { "beginTime": "2014-03-15T12:00:00.000-07:00", "description": "maintenance zone", "endTime": "2014-03-30T12:00:00.000-07:00", "name": "2014-03-15-planned-outage" } ], "name": "europe-west1-b", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b", "status": "UP" }, { "creationTimestamp": "2013-02-05T16:19:23.269-08:00", "description": "us-central1-a", "id": "13462829244527433283", "kind": "compute#zone", "name": "us-central1-a", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "status": "UP" }, { "creationTimestamp": "2013-02-05T16:19:23.269-08:00", "description": "us-central1-b", "id": "1045862591201432620", "kind": "compute#zone", "name": "us-central1-b", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "status": "UP" }, { "creationTimestamp": "2013-02-05T16:19:23.257-08:00", "deprecated": { "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "state": "DEPRECATED" }, "description": "us-central2-a", "id": "1001467574647549152", "kind": "compute#zone", "maintenanceWindows": [ { "beginTime": "2013-12-31T12:00:00.000-08:00", "description": "maintenance zone", "endTime": "2014-07-01T12:00:00.000-07:00", "name": "2013-12-31-planned-outage" } ], "name": "us-central2-a", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a", "status": "UP" } ], "kind": "compute#zoneList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_addresses_lcaddressglobal.json0000664000175000017500000000051512701023453032650 0ustar kamikami00000000000000{ "address": "173.99.99.99", "creationTimestamp": "2013-06-26T12:21:40.625-07:00", "description": "", "id": "01531551729918243104", "kind": "compute#address", "name": "lcaddressglobal", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "status": "RESERVED" } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standar0000664000175000017500000000073412701023453033534 0ustar kamikami00000000000000{ "creationTimestamp": "2012-06-07T13:48:14.670-07:00", "description": "1 vCPU, 3.75 GB RAM", "guestCpus": 1, "id": "11077240422128681563", "imageSpaceGb": 10, "kind": "compute#machineType", "maximumPersistentDisks": 16, "maximumPersistentDisksSizeGb": "10240", "memoryMb": 3840, "name": "n1-standard-1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "zone": "us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices_web_service.json0000664000175000017500000000167713153541406033155 0ustar kamikami00000000000000{ "kind": "compute#backendService", "id": "1814698108461677231", "creationTimestamp": "2014-08-15T16:14:43.729-07:00", "name": "web-service", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "backends": [ { "description": "", "group": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup", "balancingMode": "RATE", "maxRate": 100, "capacityScaler": 1.0 }, { "description": "", "group": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myinstancegroup", "balancingMode": "RATE", "maxRate": 150, "capacityScaler": 1.0 } ], "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" ], "timeoutSec": 30, "port": 80, "protocol": "HTTP", "fingerprint": "ha9VAg-MJ5M=", "portName": "" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroup_myinstancegroup.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroup_myinstanc0000664000175000017500000000165713153541406033774 0ustar kamikami00000000000000{ "kind": "compute#instanceGroup", "id": "1968709502073089769", "creationTimestamp": "2016-08-11T16:53:42.413-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "name": "managed-instance-group-1", "description": "This instance group is controlled by Instance Group Manager 'myinstancegroup'. To modify instances in this group, use the Instance Group Manager API: https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers", "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default", "fingerprint": "42WmSpB8rSM=", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroups/myinstancegroup", "size": 4, "subnetwork": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000150512701023453034001 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:48:41.278-07:00", "id": "3921383727105838816", "insertTime": "2013-06-26T16:48:35.357-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", "operationType": "detachDisk", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", "startTime": "2013-06-26T16:48:35.398-07:00", "status": "DONE", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_sslcertificates_post.json0000664000175000017500000000111213153541406031721 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "5564862567931215044", "name": "operation-1472578091714-53b4d4e0f85d1-cf587a68-9d7a9200", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/sslCertificates/example", "targetId": "2064539516762881220", "status": "PENDING", "user": "1294195755358-compute@developer.gserviceaccount.com", "progress": 0, "insertTime": "2016-08-30T10:28:11.948-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_sslcertificates_post" } ././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000151212701023453034067 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:29:07.021-07:00", "id": "04072826501537092633", "insertTime": "2013-09-03T01:29:03.082-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", "operationType": "addInstance", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", "startTime": "2013-09-03T01:29:03.145-07:00", "status": "DONE", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_addInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_a0000664000175000017500000000117113153541406034072 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.addInstances", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_addInstances", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_done.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000131512701023453034000 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:08.382-07:00", "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node_name_addAccessConfig_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node_name_addAccessConfig_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "DONE", "targetId": "16630486471904253898", "user": "foo@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxies_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxi0000664000175000017500000000121212701023453034204 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "7514147734702416613", "name": "operation_global_targetHttpProxies_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "targetId": "5242670162541625113", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-27T16:22:40.726-07:00", "startTime": "2014-10-27T16:22:41.027-07:00", "endTime": "2014-10-27T16:22:41.657-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_targetHttpProxies_post" } ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000144512701023453034053 0ustar kamikami00000000000000{ "id": "1815686149437875016", "insertTime": "2013-09-03T01:28:53.049-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", "operationType": "removeInstance", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", "startTime": "2013-09-03T01:28:53.109-07:00", "status": "PENDING", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_di0000664000175000017500000000153612701023453033773 0ustar kamikami00000000000000{ "endTime": "2013-12-16T13:04:01.580-08:00", "id": "0158330665043557584", "insertTime": "2013-12-16T13:03:51.000-08:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", "operationType": "createSnapshot", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", "startTime": "2013-12-16T13:03:51.042-08:00", "status": "DONE", "targetId": "07494414044179227172", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000022400000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000152312701023453034071 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:28:49.271-07:00", "id": "17341029456963557514", "insertTime": "2013-09-03T01:28:40.774-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", "operationType": "addHealthCheck", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", "startTime": "2013-09-03T01:28:40.838-07:00", "status": "DONE", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_d0000664000175000017500000000115513153541406034077 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.delete", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_delete", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.jso0000664000175000017500000000135712701023453033637 0ustar kamikami00000000000000{ "id": "06887337364510109333", "insertTime": "2013-06-26T10:06:11.835-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_lcdisk_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", "startTime": "2013-06-26T10:06:12.006-07:00", "status": "PENDING", "targetId": "16109451798967042451", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node_name_getSerialOutput.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node_name_get0000664000175000017500000000033612701023453033704 0ustar kamikami00000000000000{ "kind": "compute#serialPortOutput", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name/serialPort", "contents": "This is some serial\r\noutput for you." } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_instanceTemplates.json0000664000175000017500000000550013153541406031155 0ustar kamikami00000000000000{ "id": "projects/project_name/global/instanceTemplates", "items": [ { "creationTimestamp": "2016-07-18T09:53:22.323-07:00", "description": "", "id": "8161922600535111533", "kind": "compute#instanceTemplate", "name": "my-instance-template1", "properties": { "canIpForward": false, "disks": [ { "autoDelete": true, "boot": true, "deviceName": "my-instance-template1", "initializeParams": { "diskSizeGb": "10", "diskType": "pd-standard", "sourceImage": "projects/project_name/global/images/my-new-image1" }, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "type": "PERSISTENT" } ], "machineType": "n1-standard-1", "metadata": { "fingerprint": "Jt9ALJ07B8Q=", "kind": "compute#metadata" }, "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT" } ], "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE", "preemptible": false }, "serviceAccounts": [ { "email": "default", "scopes": [ "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring.write", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management" ] } ] }, "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1" } ], "kind": "compute#instanceTemplateList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json0000664000175000017500000000053012701023453031427 0ustar kamikami00000000000000{ "IPv4Range": "10.11.0.0/16", "creationTimestamp": "2013-06-26T10:05:03.500-07:00", "gatewayIPv4": "10.11.0.1", "description": "A custom network", "id": "16211908079305042870", "kind": "compute#network", "name": "lcnetwork", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_custom_node.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_custom_node.j0000664000175000017500000000270512701023453034035 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "custom-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/custom-node", "status": "TERMINATED", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_family_notfound.json0000664000175000017500000000051313153541406032213 0ustar kamikami00000000000000{ "error": { "code": 404, "errors": [ { "domain": "global", "message": "The resource 'projects/project-name/global/images/family/coreos' was not found", "reason": "notFound" } ], "message": "The resource 'projects/project-name/global/images/family/coreos' was not found" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_urlMaps_post.json0000664000175000017500000000103512701023453030154 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "14014132704089638847", "name": "operation-global_urlMaps_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/test-map2", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-27T15:21:17.438-07:00", "startTime": "2014-10-27T15:21:17.631-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_urlMaps_post" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks_cf_972cf02e6ad49112.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks_cf_972cf02e0000664000175000017500000000105013153541406033421 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#subnetwork", "id": "4297043163355844284", "creationTimestamp": "2016-03-25T05:34:27.209-07:00", "gatewayAddress": "10.128.0.1", "name": "cf-972cf02e6ad49112", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/cf", "ipCidrRange": "10.128.0.0/20", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-he0000664000175000017500000000065712701023453033537 0ustar kamikami00000000000000{ "checkIntervalSec": 5, "creationTimestamp": "2013-09-02T22:25:44.759-07:00", "healthyThreshold": 2, "id": "16372093408499501663", "kind": "compute#httpHealthCheck", "name": "libcloud-lb-demo-healthcheck", "port": 80, "requestPath": "/", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck", "timeoutSec": 5, "unhealthyThreshold": 2 }././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_set0000664000175000017500000000141712701023453033637 0ustar kamikami00000000000000{ "id": "8115150846190320932", "insertTime": "2013-06-26T21:20:03.962-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", "operationType": "setTags", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", "startTime": "2013-06-26T21:20:04.103-07:00", "status": "PENDING", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ad0000664000175000017500000000140412701023453034047 0ustar kamikami00000000000000{ "id": "7128783508312083402", "insertTime": "2013-06-26T12:21:44.075-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_addresses_lcaddress_delete", "operationType": "delete", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", "startTime": "2013-06-26T12:21:44.110-07:00", "status": "DONE", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_firewalls_post.json0000664000175000017500000000106612701023453030525 0ustar kamikami00000000000000{ "id": "16789512465352307784", "insertTime": "2013-06-26T20:51:06.068-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_post", "startTime": "2013-06-26T20:51:06.128-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000147412701023453034006 0ustar kamikami00000000000000{ "endTime": "2013-06-26T21:20:10.487-07:00", "id": "8115150846190320932", "insertTime": "2013-06-26T21:20:03.962-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", "operationType": "setTags", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", "startTime": "2013-06-26T21:20:04.103-07:00", "status": "DONE", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json0000664000175000017500000000072412701023453031750 0ustar kamikami00000000000000{ "creationTimestamp": "2013-12-16T13:03:51.345-08:00", "description": "", "diskSizeGb": "10", "id": "17482266715940883688", "kind": "compute#snapshot", "name": "lcsnapshot", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sourceDiskId": "-2511816066479461182", "status": "READY" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_instances.json0000664000175000017500000004244612701023453030320 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/instances", "items": { "zones/europe-west1-a": { "instances": [ { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:58.782-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-multiple-nodes-000", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", "type": "PERSISTENT" } ], "id": "10947706194464948790", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-multiple-nodes-000", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "192.158.28.252", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.122.85" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:37.267-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-boot-disk", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", "type": "PERSISTENT" } ], "id": "517678477070693411", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-persist-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.251.128.32", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.240.204" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", "status": "RUNNING", "tags": { "fingerprint": "EbZdwVRtKyg=", "items": [ "libcloud", "newtag" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:12.706-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-np-node", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", "type": "PERSISTENT" }, { "deviceName": "libcloud-demo-europe-attach-disk", "index": 1, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", "type": "PERSISTENT" } ], "id": "3421745795082776097", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-np-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.251.128.10", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.221.125" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" } ] }, "zones/europe-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/europe-west1-b" } ], "message": "There are no results for scope 'zones/europe-west1-b' on this page." } }, "zones/us-central1-a": { "instances": [ { "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "node-name", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ] }, "zones/us-central1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-central1-b" } ], "message": "There are no results for scope 'zones/us-central1-b' on this page." } }, "zones/us-central2-a": { "instances": [ { "canIpForward": false, "creationTimestamp": "2013-12-13T10:42:03.180-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-boot-disk", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-boot-disk", "type": "PERSISTENT" } ], "id": "2184470466384636715", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-persist-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.120.70", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.235.148" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-persist-node", "status": "RUNNING", "tags": { "fingerprint": "EbZdwVRtKyg=", "items": [ "libcloud", "newtag" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:41:47.059-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-np-node", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-np-node", "type": "PERSISTENT" }, { "deviceName": "libcloud-demo-attach-disk", "index": 1, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-attach-disk", "type": "PERSISTENT" } ], "id": "18059053700460342373", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-np-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.120.58", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.45.206" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-np-node", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:42:24.841-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-multiple-nodes-000", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-000", "type": "PERSISTENT" } ], "id": "4196532528539285480", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-multiple-nodes-000", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.120.211", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.218.251" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-multiple-nodes-000", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:42:19.041-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-multiple-nodes-001", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-001", "type": "PERSISTENT" } ], "id": "1066146046261788296", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-multiple-nodes-001", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "173.255.120.207", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.24.29" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-multiple-nodes-001", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" } ] } }, "kind": "compute#instanceAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/instances" }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.j0000664000175000017500000000116612701023453033731 0ustar kamikami00000000000000{ "id": "6717642434182216609", "insertTime": "2013-09-03T02:19:55.574-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_lchealthcheck_put", "operationType": "update", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_put", "startTime": "2013-09-03T02:19:55.628-07:00", "status": "PENDING", "targetId": "0742691415598204878", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json0000664000175000017500000000106712701023453031764 0ustar kamikami00000000000000{ "id": "3903393118268087410", "insertTime": "2013-09-03T02:19:54.629-07:00", "kind": "compute#operation", "name": "operation-global_httpHealthChecks_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_post", "startTime": "2013-09-03T02:19:54.718-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", "user": "user@gserviceaccount.com" }././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_de0000664000175000017500000000140512701023453033327 0ustar kamikami00000000000000{ "id": "3319596145594427549", "insertTime": "2013-06-26T16:13:12.903-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", "startTime": "2013-06-26T16:13:12.948-07:00", "status": "PENDING", "targetId": "5390075309006132922", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000021600000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/_zones_us_central1_a_instanceGroupManagers_myinstancegroup_listManagedInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/_zones_us_central1_a_instanceGroupManagers_0000664000175000017500000000163313153541406034020 0ustar kamikami00000000000000{ "managedInstances": [ { "currentAction": "NONE", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/base-foo-2vld", "instanceStatus": "RUNNING" }, { "currentAction": "NONE", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/base-foo-il30", "instanceStatus": "RUNNING" }, { "currentAction": "NONE", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/base-foo-lnnr", "instanceStatus": "RUNNING" }, { "currentAction": "NONE", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/base-foo-s41w", "instanceStatus": "RUNNING" } ] } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_12.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_120000664000175000017500000000025512701023453033730 0ustar kamikami00000000000000{ "kind": "compute#license", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-12", "name": "sles-12", "chargesUseFee": true } ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_fo0000664000175000017500000000144112701023453034070 0ustar kamikami00000000000000{ "endTime": "2013-09-03T00:17:33.965-07:00", "id": "0651769405845333112", "insertTime": "2013-09-03T00:17:25.381-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_forwardingRules_post", "operationType": "insert", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_post", "startTime": "2013-09-03T00:17:25.434-07:00", "status": "DONE", "targetId": "10901665092293158938", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json0000664000175000017500000000715712701023453032426 0ustar kamikami00000000000000{ "id": "projects/project_name/regions/us-central1/targetPools", "items": [ { "creationTimestamp": "2013-09-03T00:51:05.300-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "13598380121688918358", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" ], "kind": "compute#targetPool", "name": "lctargetpool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" }, { "creationTimestamp": "2013-09-02T22:25:45.817-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "16862638289615591831", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" ], "kind": "compute#targetPool", "name": "libcloud-lb-demo-lb-tp", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }, { "kind": "compute#targetPool", "id": "17914960036329768493", "creationTimestamp": "2014-11-26T08:37:28.831-08:00", "name": "backup-pool", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/default-health-check" ], "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002" ], "sessionAffinity": "CLIENT_IP", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool" }, { "kind": "compute#targetPool", "id": "11474672125700394323", "creationTimestamp": "2014-11-24T12:52:13.366-08:00", "name": "lb-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" ], "sessionAffinity": "NONE", "failoverRatio": 0.1, "backupPool": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lb-pool" } ], "kind": "compute#targetPoolList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json0000664000175000017500000000051212701023453033625 0ustar kamikami00000000000000{ "IPv4Range": "10.10.0.0/16", "creationTimestamp": "2013-06-26T09:48:15.703-07:00", "gatewayIPv4": "10.10.0.1", "id": "17172579178188075621", "kind": "compute#network", "name": "libcloud-demo-network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-network" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json0000664000175000017500000000130612701023453033053 0ustar kamikami00000000000000{ "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_listInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_l0000664000175000017500000000122313153541406034103 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/us-central1-a/instanceGroups/myname/listInstances", "items": [ { "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "status": "RUNNING" }, { "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "status": "RUNNING" } ], "kind": "compute#instanceGroupsListInstances", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname/listInstances" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json0000664000175000017500000000122612701023453033270 0ustar kamikami00000000000000{ "id": "5994251357251874363", "insertTime": "2013-12-16T13:04:03.831-08:00", "kind": "compute#operation", "name": "operation-global_snapshots_lcsnapshot_delete", "operationType": "delete", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_snapshots_lcsnapshot_delete", "startTime": "2013-12-16T13:04:03.924-08:00", "status": "PENDING", "targetId": "17482266715940883688", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks_post.json0000664000175000017500000000133113153541406033547 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_subnetworks_post", "operationType": "insert", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_subnetworks_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "user": "897001307951@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_instanceTemplates_my_instance_template1.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_instanceTemplates_my_instance_templa0000664000175000017500000000416713153541406034150 0ustar kamikami00000000000000{ "creationTimestamp": "2016-07-18T09:53:22.323-07:00", "description": "", "id": "8161922600535111533", "kind": "compute#instanceTemplate", "name": "my-instance-template1", "properties": { "canIpForward": false, "disks": [ { "autoDelete": true, "boot": true, "deviceName": "my-instance-template1", "initializeParams": { "diskSizeGb": "10", "diskType": "pd-standard", "sourceImage": "projects/project_name/global/images/my-new-image1" }, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "type": "PERSISTENT" } ], "machineType": "n1-standard-1", "metadata": { "fingerprint": "Jt9ALJ07B8Q=", "kind": "compute#metadata" }, "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT" } ], "network": "https://content.googleapis.com/compute/v1/projects/project_name/global/networks/default" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE", "preemptible": false }, "serviceAccounts": [ { "email": "default", "scopes": [ "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring.write", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management" ] } ] }, "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/generic_disk.json0000664000175000017500000000124612701023453026576 0ustar kamikami00000000000000{ "creationTimestamp": "2013-12-13T10:54:04.074-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "3535838963674672928", "kind": "compute#disk", "name": "genericdisk", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/genericdisk", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json0000664000175000017500000001361712701023453032220 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/europe-west1-a/instances", "items": [ { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:58.782-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-multiple-nodes-000", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", "type": "PERSISTENT" } ], "id": "10947706194464948790", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-multiple-nodes-000", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "192.158.28.252", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.122.85" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:12.706-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-np-node", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", "type": "PERSISTENT" }, { "deviceName": "libcloud-demo-europe-attach-disk", "index": 1, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", "type": "PERSISTENT" } ], "id": "3421745795082776097", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-np-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.251.128.10", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.221.125" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", "status": "RUNNING", "tags": { "fingerprint": "W7t6ZyTyIrc=", "items": [ "libcloud" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }, { "canIpForward": false, "creationTimestamp": "2013-12-13T10:43:37.267-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-demo-europe-boot-disk", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", "type": "PERSISTENT" } ], "id": "517678477070693411", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "libcloud-demo-europe-persist-node", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.251.128.32", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.240.204" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", "status": "RUNNING", "tags": { "fingerprint": "EbZdwVRtKyg=", "items": [ "libcloud", "newtag" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" } ], "kind": "compute#instanceList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_urlMaps_web_map.json0000664000175000017500000000057712701023453030613 0ustar kamikami00000000000000{ "kind": "compute#urlMap", "id": "4266107551250249032", "creationTimestamp": "2014-08-15T16:16:54.084-07:00", "name": "web-map", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map", "defaultService": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "fingerprint": "JiV2ACVOAlg=" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_europe-west1.json0000664000175000017500000000215713153541406030245 0ustar kamikami00000000000000{ "kind": "compute#region", "id": "1100", "creationTimestamp": "2014-05-30T18:35:16.413-07:00", "name": "europe-west1", "description": "europe-west1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 0.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 0.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 0.0 }, { "metric": "SSD_TOTAL_GB", "limit": 2048.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUPS", "limit": 100.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUP_MANAGERS", "limit": 50.0, "usage": 0.0 }, { "metric": "INSTANCES", "limit": 240.0, "usage": 0.0 }, { "metric": "AUTOSCALERS", "limit": 50.0, "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json0000664000175000017500000000226012701023453032066 0ustar kamikami00000000000000{ "id": "projects/project_name/regions/us-central1/addresses", "items": [ { "address": "108.59.82.4", "creationTimestamp": "2013-06-26T09:48:31.184-07:00", "description": "", "id": "17634862894218443422", "kind": "compute#address", "name": "libcloud-demo-address", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", "status": "RESERVED" }, { "address": "173.255.114.104", "creationTimestamp": "2013-06-04T16:28:43.764-07:00", "description": "", "id": "11879548153827627972", "kind": "compute#address", "name": "testaddress", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/testaddress", "status": "RESERVED" } ], "kind": "compute#addressList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json0000664000175000017500000000322712701023453031140 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/us-central1-a/disks", "items": [ { "creationTimestamp": "2013-12-13T10:45:42.139-08:00", "id": "08045379695757218000", "kind": "compute#disk", "name": "lcdisk", "description": "I'm a happy little SSD", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sizeGb": "10", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, { "creationTimestamp": "2013-12-13T10:45:20.308-08:00", "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "id": "0265567475385851075", "kind": "compute#disk", "name": "node-name", "description": "I'm a happy little disk", "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceImageId": "17312518942796567788", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ], "kind": "compute#diskList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks" } ././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000131612701023453034001 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:08.382-07:00", "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node_name_addAccessConfig_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node_name_addAccessConfig_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "PENDING", "targetId": "16630486471904253898", "user": "foo@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_del0000664000175000017500000000140512701023453033766 0ustar kamikami00000000000000{ "id": "7128783508312083402", "insertTime": "2013-06-26T12:21:44.075-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_addresses_lcaddress_delete", "operationType": "delete", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", "startTime": "2013-06-26T12:21:44.110-07:00", "status": "PENDING", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_sn-node-name.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_sn-node-name.0000664000175000017500000000311613153541406033463 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:45:23.351-08:00", "disks": [ { "boot": true, "deviceName": "persistent-disk-0", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "type": "PERSISTENT" } ], "id": "4006034190819017667", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", "metadata": { "fingerprint": "42WmSpB8rSM=", "kind": "compute#metadata" }, "name": "sn-node-name", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "subnetwork": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "networkIP": "10.240.72.75" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/sn-node-name", "status": "RUNNING", "tags": { "fingerprint": "42WmSpB8rSM=" }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes_pd_standard.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes_pd_standard.j0000664000175000017500000000066412701023453033627 0ustar kamikami00000000000000{ "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_in0000664000175000017500000000115513153541406034153 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.delete", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_delete", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_start.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_0000664000175000017500000000123112701023453034101 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-startnode", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "start", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/stopped-node", "targetId": "12335588484913203363", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-startnode" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-east1.json0000664000175000017500000000213713153541406027345 0ustar kamikami00000000000000{ "kind": "compute#region", "id": "1230", "creationTimestamp": "2014-09-03T16:13:49.013-07:00", "name": "us-east1", "description": "us-east1", "status": "UP", "zones": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-east1-b" ], "quotas": [ { "metric": "CPUS", "limit": 24.0, "usage": 0.0 }, { "metric": "DISKS_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "STATIC_ADDRESSES", "limit": 7.0, "usage": 0.0 }, { "metric": "IN_USE_ADDRESSES", "limit": 23.0, "usage": 0.0 }, { "metric": "SSD_TOTAL_GB", "limit": 2048.0, "usage": 0.0 }, { "metric": "LOCAL_SSD_TOTAL_GB", "limit": 10240.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUPS", "limit": 100.0, "usage": 0.0 }, { "metric": "INSTANCE_GROUP_MANAGERS", "limit": 50.0, "usage": 0.0 }, { "metric": "INSTANCES", "limit": 240.0, "usage": 0.0 }, { "metric": "AUTOSCALERS", "limit": 50.0, "usage": 0.0 } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-east1" } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroupManagers_myinstancegroup.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroupManagers_m0000664000175000017500000000171613153541406033675 0ustar kamikami00000000000000{ "kind": "compute#instanceGroupManager", "id": "8604381270851510464", "creationTimestamp": "2016-07-18T15:54:39.153-07:00", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "name": "myinstancegroup", "description": "my description for myinstancegroup", "instanceTemplate": "https://content.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my-instance-template1", "instanceGroup": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroups/myinstancegroup", "baseInstanceName": "base-foo", "fingerprint": "Q21hYveq9do=", "currentActions": { "none": 4, "creating": 0, "recreating": 0, "deleting": 0, "abandoning": 0, "restarting": 0, "refreshing": 0 }, "targetSize": 4, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroupManagers/myinstancegroup" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_forwardingRules.json0000664000175000017500000000215312701023453030643 0ustar kamikami00000000000000{ "kind": "compute#forwardingRuleList", "id": "projects/project_name/global/forwardingRules", "items": [ { "kind": "compute#forwardingRule", "id": "16224943838916174114", "creationTimestamp": "2014-08-22T11:15:26.174-07:00", "name": "http-rule", "IPAddress": "192.0.2.1", "IPProtocol": "TCP", "portRange": "80-80", "target": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule" }, { "kind": "compute#forwardingRule", "id": "16224943838916174115", "creationTimestamp": "2014-08-22T11:15:26.174-07:00", "name": "http-rule2", "IPAddress": "192.0.2.2", "IPProtocol": "TCP", "portRange": "80-80", "target": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule2" } ], "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules" } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_post.js0000664000175000017500000000115312701023453034143 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "14014131794489638887", "name": "operation-global_urlMaps_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map", "targetId": "4266107551250249032", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-27T15:21:17.438-07:00", "startTime": "2014-10-27T15:21:17.631-07:00", "endTime": "2014-10-27T15:21:18.422-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_urlMaps_post" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_disktypes.json0000664000175000017500000001076312701023453030345 0ustar kamikami00000000000000{ "kind": "compute#diskTypeAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/diskTypes", "items": { "zones/us-central1-a": { "diskTypes": [ { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "local-ssd", "description": "Local SSD", "validDiskSize": "375GB-375", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/local-ssd", "defaultDiskSizeGb": "375" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ] }, "zones/us-central1-b": { "diskTypes": [ { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ] }, "zones/europe-west1-a": { "diskTypes": [ { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ] }, "zones/europe-west1-b": { "diskTypes": [ { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" }, { "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.530-07:00", "name": "pd-standard", "description": "Standard Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/diskTypes/pd-standard", "defaultDiskSizeGb": "500" } ] } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_autoscalers.json0000664000175000017500000002140213153541406030650 0ustar kamikami00000000000000{ "items": { "regions/asia-east1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/asia-east1" } ], "message": "There are no results for scope 'regions/asia-east1' on this page." } }, "regions/europe-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/europe-west1" } ], "message": "There are no results for scope 'regions/europe-west1' on this page." } }, "regions/us-central1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-central1" } ], "message": "There are no results for scope 'regions/us-central1' on this page." } }, "regions/us-east1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-east1" } ], "message": "There are no results for scope 'regions/us-east1' on this page." } }, "regions/us-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-west1" } ], "message": "There are no results for scope 'regions/us-west1' on this page." } }, "zones/asia-east1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/asia-east1-a" } ], "message": "There are no results for scope 'zones/asia-east1-a' on this page." } }, "zones/asia-east1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/asia-east1-b" } ], "message": "There are no results for scope 'zones/asia-east1-b' on this page." } }, "zones/asia-east1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/asia-east1-c" } ], "message": "There are no results for scope 'zones/asia-east1-c' on this page." } }, "zones/europe-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/europe-west1-b" } ], "message": "There are no results for scope 'zones/europe-west1-b' on this page." } }, "zones/europe-west1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/europe-west1-c" } ], "message": "There are no results for scope 'zones/europe-west1-c' on this page." } }, "zones/europe-west1-d": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/europe-west1-d" } ], "message": "There are no results for scope 'zones/europe-west1-d' on this page." } }, "zones/us-central1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-central1-a" } ], "message": "There are no results for scope 'zones/us-central1-a' on this page." } }, "zones/us-central1-b": { "autoscalers": [ { "autoscalingPolicy": { "coolDownPeriodSec": 60, "cpuUtilization": { "utilizationTarget": 0.6 }, "maxNumReplicas": 10, "minNumReplicas": 2 }, "creationTimestamp": "2016-07-20T10:29:22.850-07:00", "id": "7161434716533557789", "kind": "compute#autoscaler", "name": "my-autoscaler", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/autoscalers/my-autoscaler", "target": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instanceGroupManagers/myinstancegroup", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" } ] }, "zones/us-central1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-central1-c" } ], "message": "There are no results for scope 'zones/us-central1-c' on this page." } }, "zones/us-central1-f": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-central1-f" } ], "message": "There are no results for scope 'zones/us-central1-f' on this page." } }, "zones/us-east1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-east1-b" } ], "message": "There are no results for scope 'zones/us-east1-b' on this page." } }, "zones/us-east1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-east1-c" } ], "message": "There are no results for scope 'zones/us-east1-c' on this page." } }, "zones/us-east1-d": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-east1-d" } ], "message": "There are no results for scope 'zones/us-east1-d' on this page." } }, "zones/us-west1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-west1-a" } ], "message": "There are no results for scope 'zones/us-west1-a' on this page." } }, "zones/us-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "zones/us-west1-b" } ], "message": "There are no results for scope 'zones/us-west1-b' on this page." } } }, "kind": "compute#autoscalerAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/autoscalers" } ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_setNamedPorts.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_in0000664000175000017500000000117313153541406034153 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.setNamedPorts", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_setNamedPorts", "user": "1264195755357-compute@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_firewalls.json0000664000175000017500000000654612701023453027470 0ustar kamikami00000000000000{ "id": "projects/project_name/global/firewalls", "items": [ { "allowed": [ { "IPProtocol": "udp" }, { "IPProtocol": "tcp" }, { "IPProtocol": "icmp" } ], "creationTimestamp": "2013-06-25T19:50:41.630-07:00", "description": "", "id": "5399576268464751692", "kind": "compute#firewall", "name": "default-allow-internal", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/default-allow-internal", "sourceRanges": [ "10.240.0.0/16" ] }, { "allowed": [ { "IPProtocol": "tcp", "ports": [ "22" ] } ], "creationTimestamp": "2013-06-25T19:48:25.111-07:00", "description": "", "id": "8063006729705804986", "kind": "compute#firewall", "name": "default-ssh", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/default-ssh", "sourceRanges": [ "0.0.0.0/0" ] }, { "allowed": [ { "IPProtocol": "tcp", "ports": [ "3141" ] } ], "creationTimestamp": "2013-11-01T14:46:25.155-07:00", "id": "13827675544891616808", "kind": "compute#firewall", "name": "libcloud-demo-europe-firewall", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-europe-network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/libcloud-demo-europe-firewall", "sourceRanges": [ "0.0.0.0/0" ], "sourceTags": [ "libcloud" ] }, { "allowed": [ { "IPProtocol": "tcp", "ports": [ "3141" ] } ], "creationTimestamp": "2013-11-01T14:44:31.284-07:00", "id": "1648761630208029546", "kind": "compute#firewall", "name": "libcloud-demo-firewall", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/libcloud-demo-firewall", "sourceRanges": [ "0.0.0.0/0" ], "sourceTags": [ "libcloud" ] }, { "allowed": [ { "IPProtocol": "tcp", "ports": [ "80" ] } ], "creationTimestamp": "2013-08-19T14:40:22.562-07:00", "description": "", "id": "01326795494450101956", "kind": "compute#firewall", "name": "www-firewall", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/www-firewall", "sourceRanges": [ "0.0.0.0/0" ], "targetTags": [ "www-tag" ] } ], "kind": "compute#firewallList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes_pd_ssd.json0000664000175000017500000000064512701023453033337 0ustar kamikami00000000000000{ "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" } ././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000146412701023453034005 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:56.931-07:00", "id": "17469711273432628502", "insertTime": "2013-06-26T16:13:40.579-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", "startTime": "2013-06-26T16:13:40.620-07:00", "status": "DONE", "targetId": "16630486471904253898", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_targetHttpProxies_post.json0000664000175000017500000000107212701023453032232 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_targetHttpProxies_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-27T16:22:40.726-07:00", "startTime": "2014-10-27T16:22:41.027-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_targetHttpProxies_post" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups.json0000664000175000017500000000244113153541406033175 0ustar kamikami00000000000000{ "id": "projects/project_name/zones/us-central1-a/instanceGroups", "items": [ { "creationTimestamp": "2016-09-09T13:48:39.700-07:00", "description": "", "fingerprint": "42WmSpB8rSM=", "id": "5837905299775594184", "kind": "compute#instanceGroup", "name": "myname", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "size": 0, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, { "creationTimestamp": "2016-09-09T13:54:30.857-07:00", "description": "", "fingerprint": "42WmSpB8rSM=", "id": "6825641674983513961", "kind": "compute#instanceGroup", "name": "myname2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname2", "size": 0, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" } ], "kind": "compute#instanceGroupList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ad0000664000175000017500000000135512701023453034054 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_addresses_post", "operationType": "insert", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "DONE", "targetId": "01531551729918243104", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_removeInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_r0000664000175000017500000000117713153541406034121 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.removeInstances", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_removeInstances", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.js0000664000175000017500000000131612701023453033777 0ustar kamikami00000000000000{ "id": "0651769405845333112", "insertTime": "2013-09-03T00:17:25.381-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_forwardingRules_post", "operationType": "insert", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_post", "startTime": "2013-09-03T00:17:25.434-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_addresses.json0000664000175000017500000000107312701023453027443 0ustar kamikami00000000000000{ "id": "projects/project_name/global/addresses", "items": [ { "address": "173.99.99.99", "creationTimestamp": "2013-06-26T09:48:31.184-07:00", "description": "", "id": "17634862894218443422", "kind": "compute#address", "name": "lcaddressglobal", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "status": "RESERVED" } ], "kind": "compute#addressList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses" } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules_http_rule_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules0000664000175000017500000000124212701023453034214 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "4444444444444444444", "name": "operation_global_forwardingRules_http_rule_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule-test", "targetId": "16224243838919174114", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-28T10:53:13.433-07:00", "startTime": "2014-10-28T10:53:13.723-07:00", "endTime": "2014-10-28T10:53:16.304-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_forwardingRules_http_rule_delete" } ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_windows-cloud_global_licenses_windows_server_2008_r2_dc.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_windows-cloud_global_licenses_wind0000664000175000017500000000030612701023453034131 0ustar kamikami00000000000000{ "kind": "compute#license", "selfLink": "https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-2008-r2-dc", "name": "windows-2008-r2-dc", "chargesUseFee": true } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_di0000664000175000017500000000135612701023453033773 0ustar kamikami00000000000000{ "id": "06887337364510109333", "insertTime": "2013-06-26T10:06:11.835-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_disks_lcdisk_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", "startTime": "2013-06-26T10:06:12.006-07:00", "status": "DONE", "targetId": "16109451798967042451", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_instanceTemplates_insert.json0000664000175000017500000000114113153541406032536 0ustar kamikami00000000000000{ "status": "PENDING", "kind": "compute#operation", "name": "my_instance_template1", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/instanceTemplates/my_instance_template1", "operationType": "compute.instanceTemplates.insert", "progress": 50, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_instanceTemplates_my_instance_template1_insert", "user": "1264195755357-compute@developer.gserviceaccount.com" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000143112701023453033777 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:08.382-07:00", "id": "1858155812259649243", "insertTime": "2013-06-26T16:12:51.492-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", "startTime": "2013-06-26T16:12:51.537-07:00", "status": "DONE", "targetId": "16630486471904253898", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_addresses.json0000664000175000017500000000630412701023453030277 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/addresses", "items": { "regions/europe-west1": { "addresses": [ { "address": "192.158.29.247", "creationTimestamp": "2013-06-26T09:51:47.506-07:00", "description": "", "id": "10955781597205896134", "kind": "compute#address", "name": "libcloud-demo-europe-address", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/addresses/libcloud-demo-europe-address", "status": "RESERVED" } ] }, "regions/us-central1": { "addresses": [ { "address": "173.255.113.20", "creationTimestamp": "2013-06-26T12:21:40.625-07:00", "description": "", "id": "01531551729918243104", "kind": "compute#address", "name": "lcaddress", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "status": "RESERVED" }, { "address": "108.59.82.4", "creationTimestamp": "2013-06-26T09:48:31.184-07:00", "description": "", "id": "17634862894218443422", "kind": "compute#address", "name": "libcloud-demo-address", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", "status": "RESERVED" }, { "address": "173.255.114.104", "creationTimestamp": "2013-06-04T16:28:43.764-07:00", "description": "", "id": "11879548153827627972", "kind": "compute#address", "name": "testaddress", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/testaddress", "status": "RESERVED" } ] }, "regions/us-central2": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-central2" } ], "message": "There are no results for scope 'regions/us-central2' on this page." } }, "global": { "addresses": [ { "address": "173.99.99.99", "creationTimestamp": "2013-06-26T12:21:40.625-07:00", "description": "", "id": "01531551729918243104", "kind": "compute#address", "name": "lcaddressglobal", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/addresses/lcaddressglobal", "status": "RESERVED" } ] } }, "kind": "compute#addressAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/addresses" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.j0000664000175000017500000000120412701023453034206 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:07.630-07:00", "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-global_networks_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_post", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "DONE", "targetId": "16211908079305042870", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_post.json0000664000175000017500000000106112701023453030404 0ustar kamikami00000000000000{ "id": "3681664092089171723", "insertTime": "2013-06-26T10:05:03.271-07:00", "kind": "compute#operation", "name": "operation-global_networks_post", "operationType": "insert", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_post", "startTime": "2013-06-26T10:05:03.315-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000022400000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000152212701023453034070 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:28:59.247-07:00", "id": "1815686149437875016", "insertTime": "2013-09-03T01:28:53.049-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", "operationType": "removeInstance", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", "startTime": "2013-09-03T01:28:53.109-07:00", "status": "DONE", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-d0000664000175000017500000000332512701023453033533 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:51:24.339-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-lb-demo-www-000", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-000", "type": "PERSISTENT" } ], "id": "08447900841145802741", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", "metadata": { "fingerprint": "IZjMMp0A_8k=", "items": [ { "key": "startup-script", "value": "apt-get -y update && apt-get -y install apache2 && hostname > /var/www/index.html" } ], "kind": "compute#metadata" }, "name": "libcloud-lb-demo-www-000", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.15", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.104.11" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "status": "RUNNING", "tags": { "fingerprint": "XI0he92M8l8=", "items": [ "libcloud-lb-demo-www" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json0000664000175000017500000003531012701023453033253 0ustar kamikami00000000000000{ "id": "projects/debian-cloud/global/images", "items": [ { "archiveSizeBytes": "365056004", "creationTimestamp": "2013-12-02T17:49:01.206-08:00", "description": "Debian GNU/Linux 7.2 (wheezy) with backports kernel built on 2013-11-27", "id": "11823693270029497919", "kind": "compute#image", "name": "backports-debian-7-wheezy-v20131127", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-v20131127", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "214107225", "creationTimestamp": "2013-05-07T17:09:22.111-07:00", "deprecated": { "deleted": "1970-01-03", "deprecated": "1970-01-01", "obsolete": "1970-01-02", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130507", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-07", "id": "647943287916432906", "kind": "compute#image", "name": "debian-6-squeeze-v20130507", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130507", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "255972840", "creationTimestamp": "2013-05-09T12:56:21.720-07:00", "deprecated": { "deprecated": "2064-03-11T20:18:36.194-07:00", "obsolete": "2074-03-11T20:18:36.194-07:00", "deleted": "2084-03-11T20:18:36.194-07:00", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-09", "id": "15745758816845911589", "kind": "compute#image", "name": "debian-6-squeeze-v20130509", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130509", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "219458106", "creationTimestamp": "2013-05-14T21:01:12.124-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-15", "id": "006866479348046290", "kind": "compute#image", "name": "debian-6-squeeze-v20130515", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130515", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "265118282", "creationTimestamp": "2013-05-30T09:48:37.837-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-22", "id": "1266148899538866390", "kind": "compute#image", "name": "debian-6-squeeze-v20130522", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130522", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "233984980", "creationTimestamp": "2013-06-19T13:45:44.111-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-06-17", "id": "04009358257173422091", "kind": "compute#image", "name": "debian-6-squeeze-v20130617", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130617", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "258168500", "creationTimestamp": "2013-07-24T12:31:06.054-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-07-23", "id": "3115342424904648000", "kind": "compute#image", "name": "debian-6-squeeze-v20130723", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130723", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "300710522", "creationTimestamp": "2013-09-04T13:21:53.292-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-08-16", "id": "06130699342353523133", "kind": "compute#image", "name": "debian-6-squeeze-v20130816", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130816", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "300710522", "creationTimestamp": "2013-10-11T09:26:47.736-07:00", "deprecated": { "deprecated": "2064-03-11T20:18:36.194-07:00", "obsolete": "2074-03-11T20:18:36.194-07:00", "deleted": "2084-03-11T20:18:36.194-07:00", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-09-26", "id": "0225119674082940764", "kind": "compute#image", "name": "debian-6-squeeze-v20130926", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130926", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "237290472", "creationTimestamp": "2013-05-07T17:01:30.071-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-07", "id": "15638477823580670459", "kind": "compute#image", "name": "debian-7-wheezy-v20130507", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130507", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "270107366", "creationTimestamp": "2013-05-09T12:56:47.910-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-09", "id": "020034532765408091", "kind": "compute#image", "name": "debian-7-wheezy-v20130509", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130509", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "265604335", "creationTimestamp": "2013-05-14T21:02:55.044-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-15", "id": "0587071888358410836", "kind": "compute#image", "name": "debian-7-wheezy-v20130515", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130515", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "284301993", "creationTimestamp": "2013-05-30T09:47:30.980-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-22", "id": "622079684385221180", "kind": "compute#image", "name": "debian-7-wheezy-v20130522", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130522", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "310882322", "creationTimestamp": "2013-06-19T13:47:20.563-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", "id": "1549141992333368759", "kind": "compute#image", "name": "debian-7-wheezy-v20130617", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "258869806", "creationTimestamp": "2013-07-24T12:31:36.790-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-07-23", "id": "3119304810142650253", "kind": "compute#image", "name": "debian-7-wheezy-v20130723", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130723", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "279162251", "creationTimestamp": "2013-09-04T13:24:30.479-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-08-16", "id": "2595370902107306327", "kind": "compute#image", "name": "debian-7-wheezy-v20130816", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130816", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "279162251", "creationTimestamp": "2013-10-11T09:26:56.993-07:00", "deprecated": { "deprecated": "2013-11-14T00:00:00Z", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-09-26", "id": "06737951524754934395", "kind": "compute#image", "name": "debian-7-wheezy-v20130926", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130926", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "405683884", "creationTimestamp": "2013-10-28T13:52:08.233-07:00", "deprecated": { "deprecated": "2064-03-11T20:18:36.194-07:00", "obsolete": "2074-03-11T20:18:36.194-07:00", "deleted": "2084-03-11T20:18:36.194-07:00", "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "state": "DEPRECATED" }, "description": "Debian GNU/Linux 7.2 (wheezy) built on 2013-10-14", "id": "1405559880052641502", "kind": "compute#image", "name": "debian-7-wheezy-v20131014", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", "sourceType": "RAW", "status": "READY" }, { "archiveSizeBytes": "341857472", "creationTimestamp": "2013-11-25T15:17:00.436-08:00", "description": "Debian GNU/Linux 7.2 (wheezy) built on 2013-11-20", "id": "05708985343919147751", "kind": "compute#image", "name": "debian-7-wheezy-v20131120", "rawDisk": { "containerType": "TAR", "source": "" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", "sourceType": "RAW", "status": "READY" } ], "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_res0000664000175000017500000000141212701023453033630 0ustar kamikami00000000000000{ "id": "10507122129283663728", "insertTime": "2013-06-26T15:03:02.766-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_reset_post", "operationType": "reset", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", "startTime": "2013-06-26T15:03:02.813-07:00", "status": "PENDING", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_images.json0000664000175000017500000001236012701023453033010 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images", "id": "projects/suse-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140306", "id": "3727805086509383287", "creationTimestamp": "2014-03-06T13:13:29.791-08:00", "name": "sles-11-sp3-v20140306", "description": "", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140609", "deprecated": "2014-06-09T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "354497936", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140609", "id": "10656986931280984622", "creationTimestamp": "2014-06-09T10:29:06.385-07:00", "name": "sles-11-sp3-v20140609", "description": "", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140712", "deprecated": "2014-07-12T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "1191603546", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140712", "id": "3415847542100990147", "creationTimestamp": "2014-07-12T03:39:17.695-07:00", "name": "sles-11-sp3-v20140712", "description": "SUSE Linux Enterprise 11 SP3 built on 2014-07-12", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140826", "deprecated": "2014-06-26T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "1071997074", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140826", "id": "588070221570840387", "creationTimestamp": "2014-08-26T14:46:38.449-07:00", "name": "sles-11-sp3-v20140826", "description": "SUSE Linux Enterprise 11 SP3 released on 2014-06-26, built on 2014-08-20", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140930", "deprecated": "2014-10-30T00:00:00Z" }, "status": "READY", "archiveSizeBytes": "1072617138", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20140930", "id": "3132872945991231828", "creationTimestamp": "2014-09-30T08:27:46.201-07:00", "name": "sles-11-sp3-v20140930", "description": "SUSE Linux Enterprise Server 11 SP3", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1102825953", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-11-sp3-v20141105", "id": "14793554030256860036", "creationTimestamp": "2014-11-05T16:11:49.996-08:00", "name": "sles-11-sp3-v20141105", "description": "SUSE Linux Enterprise Server 11 SP3 built on 2014-11-05", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1075782309", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-11" ] }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-12-v20141023", "id": "15301906009182317384", "creationTimestamp": "2014-10-26T08:14:59.932-07:00", "name": "sles-12-v20141023", "description": "SUSE Linux Enterprise Server 12 built on 2014-10-23", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1525260684", "diskSizeGb": "8", "licenses": [ "https://content.googleapis.com/compute/v1/projects/suse-cloud/global/licenses/sles-12" ] } ] } ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000144612701023453034054 0ustar kamikami00000000000000{ "id": "17341029456963557514", "insertTime": "2013-09-03T01:28:40.774-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", "operationType": "addHealthCheck", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", "startTime": "2013-09-03T01:28:40.838-07:00", "status": "PENDING", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctargetinstance_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctarge0000664000175000017500000000140712701023453033710 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "16305469717066123402", "name": "operation-zones_us-central1-a_targetInstances_lctargetinstance_delete", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance", "targetId": "8092539649535704539", "status": "PENDING", "user": "erjohnso@google.com", "progress": 0, "insertTime": "2014-11-14T13:05:18.564-08:00", "startTime": "2014-11-14T13:05:18.868-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_targetInstances_lctargetinstance_delete" } ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000150512701023453034001 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:48:31.831-07:00", "id": "7455886659787654716", "insertTime": "2013-06-26T16:48:27.691-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", "operationType": "attachDisk", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", "startTime": "2013-06-26T16:48:27.762-07:00", "status": "DONE", "targetId": "1845312225624811608", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules0000664000175000017500000000120612701023453034214 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "44444444444444444444", "name": "operation_global_forwardingRules_post", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule", "targetId": "16224243838919174114", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-10-27T17:10:54.102-07:00", "startTime": "2014-10-27T17:10:54.531-07:00", "endTime": "2014-10-27T17:10:59.466-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_forwardingRules_post" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfir0000664000175000017500000000124112701023453034205 0ustar kamikami00000000000000{ "endTime": "2013-06-26T10:05:00.978-07:00", "id": "8983098895755095934", "insertTime": "2013-06-26T10:04:53.453-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_lcfirewall_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", "startTime": "2013-06-26T10:04:53.508-07:00", "status": "DONE", "targetId": "0565629596395414121", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices_web_service_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_backendServices_web_service_delete.j0000664000175000017500000000116712701023453033744 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_backendServices_web_service_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/backendServices/web-service", "targetId": "15555555555223232737", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-28T12:51:20.402-07:00", "startTime": "2014-10-28T12:51:20.623-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_backendServices_web_service_delete" } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libclou0000664000175000017500000000112312701023453034024 0ustar kamikami00000000000000{ "IPAddress": "108.59.83.110", "IPProtocol": "TCP", "creationTimestamp": "2013-09-29T13:30:00.702-07:00", "id": "1077550228014866104", "kind": "compute#forwardingRule", "name": "libcloud-lb-demo-lb", "portRange": "80-80", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131120_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131120_del0000664000175000017500000000122012701023453033063 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "10762099380229198553", "name": "operation-global_images_debian7_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian-7-wheezy-v20131120", "targetId": "14881612020726561163", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-03-11T14:37:48.075-07:00", "startTime": "2014-03-11T14:37:48.158-07:00", "endTime": "2014-03-11T14:37:48.634-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_images_debian7_delete" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_stopnode.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_st0000664000175000017500000000122212701023453034161 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "18431811683007150988", "name": "operation-stopnode", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "stop", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "targetId": "12335588484913203363", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2015-01-30T06:55:11.503-08:00", "startTime": "2015-01-30T06:55:11.847-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-stopnode" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disktypes_pd-ssd.json0000664000175000017500000000064512701023453033315 0ustar kamikami00000000000000{ "kind": "compute#diskType", "creationTimestamp": "2014-06-02T11:07:28.529-07:00", "name": "pd-ssd", "description": "SSD Persistent Disk", "validDiskSize": "10GB-10240GB", "zone": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://content.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "defaultDiskSizeGb": "100" } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-d0000664000175000017500000000060112701023453033525 0ustar kamikami00000000000000{ "error": { "code": 404, "errors": [ { "domain": "global", "message": "The resource 'projects/project-name/zones/us-central1-b/instances/libcloud-lb-demo-www-002' was not found", "reason": "notFound" } ], "message": "The resource 'projects/project-name/zones/us-central1-b/instances/libcloud-lb-demo-www-002' was not found" } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_forwardingRules_http_rule_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_forwardingRules_http_rule_delete.jso0000664000175000017500000000116612701023453034100 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "3333333333333333333", "name": "operation_global_forwardingRules_http_rule_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/forwardingRules/http-rule-test", "targetId": "16224243838919174114", "status": "PENDING", "user": "user@developer.gserviceaccount.com", "progress": 0, "insertTime": "2014-10-28T10:53:13.433-07:00", "startTime": "2014-10-28T10:53:13.723-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation_global_forwardingRules_http_rule_delete" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json0000664000175000017500000000115712701023453032555 0ustar kamikami00000000000000{ "id": "6526551968265354277", "insertTime": "2013-06-26T20:52:00.355-07:00", "kind": "compute#operation", "name": "operation-global_firewalls_lcfirewall_put", "operationType": "update", "progress": 0, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", "startTime": "2013-06-26T20:52:00.410-07:00", "status": "PENDING", "targetId": "10942695305090163011", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", "user": "897001307951@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_in0000664000175000017500000000146212701023453034003 0ustar kamikami00000000000000{ "endTime": "2013-06-26T16:13:36.800-07:00", "id": "3319596145594427549", "insertTime": "2013-06-26T16:13:12.903-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", "operationType": "delete", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", "startTime": "2013-06-26T16:13:12.948-07:00", "status": "DONE", "targetId": "5390075309006132922", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-networ0000664000175000017500000000053012701023453033777 0ustar kamikami00000000000000{ "IPv4Range": "10.10.0.0/16", "creationTimestamp": "2013-06-26T09:51:34.018-07:00", "gatewayIPv4": "10.10.0.1", "id": "13254259054875092094", "kind": "compute#network", "name": "libcloud-demo-europe-network", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-europe-network" }././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_i0000664000175000017500000000205212701023453034023 0ustar kamikami00000000000000{ "error": { "errors": [ { "code": "RESOURCE_ALREADY_EXISTS", "message": "The resource 'projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node' already exists" } ] }, "httpErrorMessage": "CONFLICT", "httpErrorStatusCode": 409, "id": "1510575454210533141", "insertTime": "2013-06-26T20:57:34.366-07:00", "kind": "compute#operation", "name": "operation-zones_europe-west1-a_instances_post", "operationType": "insert", "progress": 100, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", "startTime": "2013-06-26T20:57:34.453-07:00", "status": "DONE", "targetId": "14308265828754333159", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", "user": "897001307951@developer.gserviceaccount.com", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" }././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_routes_lcdemoroute_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_routes_lcdemoro0000664000175000017500000000103112701023453034240 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17322940416642455149", "name": "operation-global_route_lcdemoroute", "operationType": "destroy", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/routes/lcdemoroute", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2014-11-25T11:00:44.049-08:00", "startTime": "2014-11-25T11:00:44.385-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_route_lcdemoroute" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json0000664000175000017500000000127612701023453033467 0ustar kamikami00000000000000{ "id": "7487852523793007955", "insertTime": "2013-09-03T00:51:05.064-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_post", "operationType": "insert", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_post", "startTime": "2013-09-03T00:51:05.115-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_images.json0000664000175000017500000000114312701023453026731 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images", "id": "projects/project_name/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/aws-ubuntu", "id": "15632509721401584263", "creationTimestamp": "2014-12-09T09:26:27.234-08:00", "name": "aws-ubuntu", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "243720274", "diskSizeGb": "10" } ] } ././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_addInstances.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_in0000664000175000017500000000117113153541406034151 0ustar kamikami00000000000000{ "status": "DONE", "kind": "compute#operation", "name": "myname", "zone": "us-central1-a", "insertTime": "2016-09-02T09:31:52.285-07:00", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroups/myname", "operationType": "compute.instanceGroups.addInstances", "progress": 100, "id": 123456, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us_central1_a_instanceGroups_myname_addInstances", "user": "1264195755357-compute@developer.gserviceaccount.com" }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_post.js0000664000175000017500000000131712701023453033667 0ustar kamikami00000000000000{ "id": "7487852523793007955", "insertTime": "2013-09-03T00:51:05.064-07:00", "kind": "compute#operation", "name": "operation-zones_us-central1-a_targetInstances_post", "operationType": "insert", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_targetInstances_post", "startTime": "2013-09-03T00:51:05.115-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance", "user": "user@gserviceaccount.com" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/global_targetHttpProxies.json0000664000175000017500000000170012701023453031163 0ustar kamikami00000000000000{ "kind": "compute#targetHttpProxyList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies", "id": "projects/project_name/global/targetHttpProxies", "items": [ { "kind": "compute#targetHttpProxy", "id": "2276970411930672658", "creationTimestamp": "2014-08-22T09:47:35.425-07:00", "name": "web-proxy", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy", "urlMap": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map" }, { "kind": "compute#targetHttpProxy", "id": "2276970411930672659", "creationTimestamp": "2014-08-22T09:47:35.425-07:00", "name": "web-proxy2", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/targetHttpProxies/web-proxy2", "urlMap": "https://www.googleapis.com/compute/v1/projects/project_name/global/urlMaps/web-map" } ] } ././@LongLink0000000000000000000000000000023500000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_operations_operation_zones_us-central1-a_instanceGroupManagers_insert_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_operations_operation_zo0000664000175000017500000000162613153541406034034 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "5029726187056811377", "name": "operation-zones_us-central1-a_instanceGroupManagers_insert_post", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "compute.instanceGroupManagers.insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instanceGroupManagers/managed-instance-group-1", "targetId": "1549031063416753526", "status": "DONE", "user": "999999-compute@developer.gserviceaccount.com", "progress": 100, "insertTime": "2016-08-12T09:04:46.114-07:00", "startTime": "2016-08-12T09:04:46.117-07:00", "endTime": "2016-08-12T09:04:46.117-07:00", "selfLink" : "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instanceGroupManagers_insert_post" } ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_targetInstances_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_ta0000664000175000017500000000127412701023453034002 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "17976948162128740230", "name": "operation-zones_us-central1-a_targetInstances_post", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "operationType": "insert", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance", "status": "DONE", "user": "erjohnso@google.com", "progress": 100, "insertTime": "2014-11-14T13:21:20.789-08:00", "startTime": "2014-11-14T13:21:21.118-08:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_targetInstances_post" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json0000664000175000017500000000131012701023453033126 0ustar kamikami00000000000000{ "id": "16064059851942653139", "insertTime": "2013-06-26T12:21:40.299-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_addresses_post", "operationType": "insert", "progress": 0, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", "startTime": "2013-06-26T12:21:40.358-07:00", "status": "PENDING", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", "user": "897001307951@developer.gserviceaccount.com" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_targetPools.json0000664000175000017500000001177512701023453030635 0ustar kamikami00000000000000{ "id": "projects/project_name/aggregated/targetPools", "items": { "regions/europe-west1": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/europe-west1" } ], "message": "There are no results for scope 'regions/europe-west1' on this page." } }, "regions/us-central1": { "targetPools": [ { "creationTimestamp": "2013-11-01T14:50:04.620-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "6918395933376220338", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" ], "kind": "compute#targetPool", "name": "libcloud-lb-demo-lb-tp", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" }, { "creationTimestamp": "2013-11-01T14:51:45.822-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "id": "2277093827336176997", "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" ], "kind": "compute#targetPool", "name": "lctargetpool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" }, { "creationTimestamp": "2013-11-01T12:09:45.831-07:00", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" ], "id": "03531496913089065061", "kind": "compute#targetPool", "name": "www-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/www-pool", "sessionAffinity": "NONE" }, { "kind": "compute#targetPool", "id": "17914960036329768493", "creationTimestamp": "2014-11-26T08:37:28.831-08:00", "name": "backup-pool", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/default-health-check" ], "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002" ], "sessionAffinity": "CLIENT_IP", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool" }, { "kind": "compute#targetPool", "id": "11474672125700394323", "creationTimestamp": "2014-11-24T12:52:13.366-08:00", "name": "lb-pool", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "healthChecks": [ "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" ], "instances": [ "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" ], "sessionAffinity": "NONE", "failoverRatio": 0.1, "backupPool": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/backup-pool", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lb-pool" } ] }, "regions/us-central2": { "warning": { "code": "NO_RESULTS_ON_PAGE", "data": [ { "key": "scope", "value": "regions/us-central2" } ], "message": "There are no results for scope 'regions/us-central2' on this page." } } }, "kind": "compute#targetPoolAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/targetPools" } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_0000664000175000017500000000121712701023453034036 0ustar kamikami00000000000000{ "kind": "compute#operation", "id": "10762099380229198553", "name": "operation-global_images_debian7_delete", "operationType": "delete", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian-7-wheezy-v20131120", "targetId": "14881612020726561163", "status": "DONE", "user": "user@developer.gserviceaccount.com", "progress": 100, "insertTime": "2014-03-11T14:37:48.075-07:00", "startTime": "2014-03-11T14:37:48.158-07:00", "endTime": "2014-03-11T14:37:48.634-07:00", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_images_debian7_delete" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpoo0000664000175000017500000000064312701023453034052 0ustar kamikami00000000000000{ "creationTimestamp": "2014-07-11T15:52:43.720-07:00", "id": "13598380121688918358", "kind": "compute#targetPool", "name": "lctargetpool-sticky", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool-sticky", "sessionAffinity": "CLIENT_IP_PROTO" }././@LongLink0000000000000000000000000000022700000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_ta0000664000175000017500000000153412701023453034073 0ustar kamikami00000000000000{ "endTime": "2013-09-03T01:28:37.095-07:00", "id": "14738174613993796821", "insertTime": "2013-09-03T01:28:32.889-07:00", "kind": "compute#operation", "name": "operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", "operationType": "removeHealthCheck", "progress": 100, "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", "startTime": "2013-09-03T01:28:32.942-07:00", "status": "DONE", "targetId": "16862638289615591831", "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", "user": "user@gserviceaccount.com" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwa0000664000175000017500000000116412701023453034035 0ustar kamikami00000000000000{ "IPAddress": "173.255.119.224", "IPProtocol": "TCP", "creationTimestamp": "2013-09-03T00:17:25.544-07:00", "id": "10901665092293158938", "kind": "compute#forwardingRule", "name": "lcforwardingrule", "description": "test forwarding rule", "portRange": "8000-8500", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/aggregated_targetInstances.json0000664000175000017500000000664712701023453031472 0ustar kamikami00000000000000{ "kind": "compute#targetInstanceAggregatedList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/targetInstances", "id": "projects/project_name/aggregated/targetInstances", "items": { "zones/asia-east1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-a' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-a" } ] } }, "zones/asia-east1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-b' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-b" } ] } }, "zones/asia-east1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/asia-east1-c' on this page.", "data": [ { "key": "scope", "value": "zones/asia-east1-c" } ] } }, "zones/europe-west1-a": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-a' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-a" } ] } }, "zones/europe-west1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-b' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-b" } ] } }, "zones/europe-west1-c": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/europe-west1-c' on this page.", "data": [ { "key": "scope", "value": "zones/europe-west1-c" } ] } }, "zones/us-central1-a": { "targetInstances": [ { "kind": "compute#targetInstance", "id": "8092539649535704539", "creationTimestamp": "2014-08-07T12:46:10.372-07:00", "name": "hello", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "natPolicy": "NO_NAT", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/hello" }, { "kind": "compute#targetInstance", "id": "9539205115599811578", "creationTimestamp": "2014-08-07T13:09:19.634-07:00", "name": "lctargetinstance", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", "natPolicy": "NO_NAT", "instance": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/targetInstances/lctargetinstance" } ] }, "zones/us-central1-b": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-central1-b' on this page.", "data": [ { "key": "scope", "value": "zones/us-central1-b" } ] } }, "zones/us-central1-f": { "warning": { "code": "NO_RESULTS_ON_PAGE", "message": "There are no results for scope 'zones/us-central1-f' on this page.", "data": [ { "key": "scope", "value": "zones/us-central1-f" } ] } } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/projects_centos-cloud_global_images.json0000664000175000017500000003711612701023453033332 0ustar kamikami00000000000000{ "kind": "compute#imageList", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images", "id": "projects/centos-cloud/global/images", "items": [ { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20131120", "id": "11748647391859510935", "creationTimestamp": "2013-11-25T15:13:50.611-08:00", "name": "centos-6-v20131120", "description": "SCSI-enabled CentOS 6 built on 2013-11-20", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140318" }, "status": "READY", "archiveSizeBytes": "269993565", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140318", "id": "11743140967858608122", "creationTimestamp": "2014-03-19T15:01:13.388-07:00", "name": "centos-6-v20140318", "description": "CentOS 6.5 x86_64 built on 2014-03-18", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140408" }, "status": "READY", "archiveSizeBytes": "341230444", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140408", "id": "18033188469723077298", "creationTimestamp": "2014-04-09T10:31:57.518-07:00", "name": "centos-6-v20140408", "description": "CentOS 6.5 x86_64 built on 2014-04-08", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140415" }, "status": "READY", "archiveSizeBytes": "342252847", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140415", "id": "10463166969914166288", "creationTimestamp": "2014-04-22T12:05:16.927-07:00", "name": "centos-6-v20140415", "description": "CentOS 6.5 x86_64 built on 2014-04-15", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140522" }, "status": "READY", "archiveSizeBytes": "1026663807", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140522", "id": "14390164727436022001", "creationTimestamp": "2014-06-03T10:21:42.109-07:00", "name": "centos-6-v20140522", "description": "CentOS 6.5 x86_64 built on 2014-05-22", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140606" }, "status": "READY", "archiveSizeBytes": "1028292810", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140605", "id": "16310166269920012092", "creationTimestamp": "2014-06-05T11:04:45.767-07:00", "name": "centos-6-v20140605", "description": "CentOS 6.5 x86_64 built on 2014-06-05", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140606" }, "status": "READY", "archiveSizeBytes": "1028745777", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140606", "id": "6290630306542078308", "creationTimestamp": "2014-06-06T13:16:42.265-07:00", "name": "centos-6-v20140606", "description": "CentOS 6.5 x86_64 built on 2014-06-06", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140619" }, "status": "READY", "archiveSizeBytes": "1028757792", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140619", "id": "3614861379648377676", "creationTimestamp": "2014-06-24T13:28:11.552-07:00", "name": "centos-6-v20140619", "description": "CentOS 6.5 x86_64 built on 2014-06-19", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140718" }, "status": "READY", "archiveSizeBytes": "1029860991", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140718", "id": "16259951858818091437", "creationTimestamp": "2014-07-24T09:02:18.298-07:00", "name": "centos-6-v20140718", "description": "CentOS 6.5 x86_64 built on 2014-07-18", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140924" }, "status": "READY", "archiveSizeBytes": "1031630715", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140924", "id": "13087714199807465700", "creationTimestamp": "2014-09-24T19:21:53.421-07:00", "name": "centos-6-v20140924", "description": "CentOS 6.5 x86_64 built on 2014-09-24", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140926" }, "status": "READY", "archiveSizeBytes": "1040237724", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20140926", "id": "2580521871229876339", "creationTimestamp": "2014-09-29T09:26:44.364-07:00", "name": "centos-6-v20140926", "description": "CentOS 6.5 x86_64 built on 2014-09-26", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141007" }, "status": "READY", "archiveSizeBytes": "1040082792", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141007", "id": "3381938258505751441", "creationTimestamp": "2014-10-16T14:52:10.720-07:00", "name": "centos-6-v20141007", "description": "CentOS 6.5 x86_64 built on 2014-10-07", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141016" }, "status": "READY", "archiveSizeBytes": "1040311077", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141016", "id": "2365868823508405185", "creationTimestamp": "2014-10-17T16:46:57.144-07:00", "name": "centos-6-v20141016", "description": "CentOS 6.5 x86_64 built on 2014-10-16", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141021" }, "status": "READY", "archiveSizeBytes": "1040361036", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141021", "id": "10836725743769588052", "creationTimestamp": "2014-10-22T18:24:03.632-07:00", "name": "centos-6-v20141021", "description": "CentOS 6.5 x86_64 built on 2014-10-21", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141108" }, "status": "READY", "archiveSizeBytes": "1040416587", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141108", "id": "4053040619898291132", "creationTimestamp": "2014-11-10T14:25:17.670-08:00", "name": "centos-6-v20141108", "description": "CentOS 6.6 x86_64 built on 2014-11-08", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141205" }, "status": "READY", "archiveSizeBytes": "1049466963", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141205", "id": "17207937043950962086", "creationTimestamp": "2014-12-08T16:14:54.943-08:00", "name": "centos-6-v20141205", "description": "CentOS 6.6 x86_64 built on 2014-12-05", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1056393081", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20140903", "id": "4568702763004249623", "creationTimestamp": "2014-09-04T09:50:19.966-07:00", "name": "centos-7-v20140903", "description": "CentOS 7.0 x86_64 built on 2014-09-03", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20140924" }, "status": "READY", "archiveSizeBytes": "1168167201", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20140924", "id": "4822681162745636585", "creationTimestamp": "2014-09-24T19:57:13.650-07:00", "name": "centos-7-v20140924", "description": "CentOS 7 x86_64 built on 2014-09-24", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20140926" }, "status": "READY", "archiveSizeBytes": "1181699781", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20140926", "id": "11630347837395986864", "creationTimestamp": "2014-09-29T09:29:54.626-07:00", "name": "centos-7-v20140926", "description": "CentOS 7 x86_64 built on 2014-09-26", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141007" }, "status": "READY", "archiveSizeBytes": "1182441174", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141007", "id": "13420104487111729570", "creationTimestamp": "2014-10-16T14:18:33.905-07:00", "name": "centos-7-v20141007", "description": "CentOS 7.0 x86_64 built on 2014-10-07", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141016" }, "status": "READY", "archiveSizeBytes": "1182982164", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141016", "id": "4506010319257803087", "creationTimestamp": "2014-10-17T16:43:06.539-07:00", "name": "centos-7-v20141016", "description": "CentOS 7.0 x86_64 built on 2014-10-16", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141021" }, "status": "READY", "archiveSizeBytes": "1184558412", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141021", "id": "4536638025069785573", "creationTimestamp": "2014-10-22T18:27:40.851-07:00", "name": "centos-7-v20141021", "description": "CentOS 7 x86_64 built on 2014-10-21", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141108" }, "status": "READY", "archiveSizeBytes": "1183591245", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141108", "id": "853041310537923411", "creationTimestamp": "2014-11-10T14:22:16.416-08:00", "name": "centos-7-v20141108", "description": "CentOS 7 x86_64 built on 2014-11-08", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "deprecated": { "state": "DEPRECATED", "replacement": "https://content.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141205" }, "status": "READY", "archiveSizeBytes": "1192600188", "diskSizeGb": "10" }, { "kind": "compute#image", "selfLink": "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20141205", "id": "9955643093605856709", "creationTimestamp": "2014-12-08T16:35:02.271-08:00", "name": "centos-7-v20141205", "description": "CentOS 7 x86_64 built on 2014-12-05", "sourceType": "RAW", "rawDisk": { "source": "", "containerType": "TAR" }, "status": "READY", "archiveSizeBytes": "1196735889", "diskSizeGb": "10" } ] } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-d0000664000175000017500000000332612701023453033534 0ustar kamikami00000000000000{ "canIpForward": false, "creationTimestamp": "2013-12-13T10:51:25.165-08:00", "disks": [ { "boot": true, "deviceName": "libcloud-lb-demo-www-001", "index": 0, "kind": "compute#attachedDisk", "mode": "READ_WRITE", "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-001", "type": "PERSISTENT" } ], "id": "11523404878553997348", "kind": "compute#instance", "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", "metadata": { "fingerprint": "09vSzO6KXcw=", "items": [ { "key": "startup-script", "value": "apt-get -y update && apt-get -y install apache2 && hostname > /var/www/index.html" } ], "kind": "compute#metadata" }, "name": "libcloud-lb-demo-www-001", "networkInterfaces": [ { "accessConfigs": [ { "kind": "compute#accessConfig", "name": "External NAT", "natIP": "23.236.58.59", "type": "ONE_TO_ONE_NAT" } ], "name": "nic0", "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", "networkIP": "10.240.94.107" } ], "scheduling": { "automaticRestart": true, "onHostMaintenance": "MIGRATE" }, "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", "status": "RUNNING", "tags": { "fingerprint": "XI0he92M8l8=", "items": [ "libcloud-lb-demo-www" ] }, "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/0000775000175000017500000000000013160535110023666 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/list_locations.json0000664000175000017500000000230312701023453027607 0ustar kamikami00000000000000{"6":{"DCID":"6","name":"Atlanta","country":"US","continent":"North America","state":"GA"},"2":{"DCID":"2","name":"Chicago","country":"US","continent":"North America","state":"IL"},"3":{"DCID":"3","name":"Dallas","country":"US","continent":"North America","state":"TX"},"5":{"DCID":"5","name":"Los Angeles","country":"US","continent":"North America","state":"CA"},"39":{"DCID":"39","name":"Miami","country":"US","continent":"","state":"FL"},"1":{"DCID":"1","name":"New Jersey","country":"US","continent":"North America","state":"NJ"},"4":{"DCID":"4","name":"Seattle","country":"US","continent":"North America","state":"WA"},"12":{"DCID":"12","name":"Silicon Valley","country":"US","continent":"North America","state":"CA"},"7":{"DCID":"7","name":"Amsterdam","country":"NL","continent":"Europe","state":""},"25":{"DCID":"25","name":"Tokyo","country":"JP","continent":"Asia","state":""},"8":{"DCID":"8","name":"London","country":"GB","continent":"Europe","state":""},"24":{"DCID":"24","name":"France","country":"FR","continent":"Europe","state":""},"9":{"DCID":"9","name":"Frankfurt","country":"DE","continent":"Europe","state":""},"19":{"DCID":"19","name":"Australia","country":"AU","continent":"Australia","state":""}} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/list_key_pairs.json0000664000175000017500000000106413153541406027612 0ustar kamikami00000000000000{ "5806a8ef2a0c6": { "SSHKEYID": "5806a8ef2a0c6", "date_created": "2016-10-18 18:57:51", "name": "test-key-pair", "ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZYMivN4KqJZ3dNEWeH20PUeB2ZnZRkk91K5SgxWrEotgpX4pMVM/9oxkh4bKw5CBzT6KAOghzLcBViFpNVjDyyet9wwVcy6cjuUynx63UtbTLB+r4D+bD/+/9rQTeckvGYg9Y8xIKL/oaVeCcdBM8JhSQZbZ/aARi2K79FWGH61azAqc/JCHT63f3FhspjdVpcVoVOjsZG3WG6Vymys2cXH1PM5qMgBbmp+5LkSv0LvUULyxcrtKkUyntPr1BvIFSNbo2lhXLwnM4DXONP6U/yMFte+ZwiajF7pUCdB9HqvXVU+IfswYSDuhHzL9j8+ZLQ2enF/lkkYxpMHE2t215 tester@test" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/create_node.json0000664000175000017500000000002313153541406027033 0ustar kamikami00000000000000{ "SUBID": "1" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/error_rate_limit.txt0000664000175000017500000000013513153541406027777 0ustar kamikami00000000000000Rate limit reached - please try your request again later. Current rate limit: 2 requests/secapache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/list_images.json0000664000175000017500000000314012701023453027061 0ustar kamikami00000000000000{"127":{"OSID":127,"name":"CentOS 6 x64","arch":"x64","family":"centos","windows":false},"147":{"OSID":147,"name":"CentOS 6 i386","arch":"i386","family":"centos","windows":false},"162":{"OSID":162,"name":"CentOS 5 x64","arch":"x64","family":"centos","windows":false},"163":{"OSID":163,"name":"CentOS 5 i386","arch":"i386","family":"centos","windows":false},"167":{"OSID":167,"name":"CentOS 7 x64","arch":"x64","family":"centos","windows":false},"160":{"OSID":160,"name":"Ubuntu 14.04 x64","arch":"x64","family":"ubuntu","windows":false},"161":{"OSID":161,"name":"Ubuntu 14.04 i386","arch":"i386","family":"ubuntu","windows":false},"128":{"OSID":128,"name":"Ubuntu 12.04 x64","arch":"x64","family":"ubuntu","windows":false},"148":{"OSID":148,"name":"Ubuntu 12.04 i386","arch":"i386","family":"ubuntu","windows":false},"181":{"OSID":181,"name":"Ubuntu 14.10 x64","arch":"x64","family":"ubuntu","windows":false},"182":{"OSID":182,"name":"Ubuntu 14.10 i386","arch":"i386","family":"ubuntu","windows":false},"139":{"OSID":139,"name":"Debian 7 x64 (wheezy)","arch":"x64","family":"debian","windows":false},"152":{"OSID":152,"name":"Debian 7 i386 (wheezy)","arch":"i386","family":"debian","windows":false},"140":{"OSID":140,"name":"FreeBSD 10 x64","arch":"x64","family":"freebsd","windows":false},"124":{"OSID":124,"name":"Windows 2012 R2 x64","arch":"x64","family":"windows","windows":true},"159":{"OSID":159,"name":"Custom","arch":"x64","family":"iso","windows":false},"164":{"OSID":164,"name":"Snapshot","arch":"x64","family":"snapshot","windows":false},"180":{"OSID":180,"name":"Backup","arch":"x64","family":"backup","windows":false}}apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/create_key_pair.json0000664000175000017500000000004413153541406027714 0ustar kamikami00000000000000{ "SSHKEYID": "5806ab4970aba" } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/list_nodes.json0000664000175000017500000000343612701023453026734 0ustar kamikami00000000000000{"1":{"SUBID":"1","os":"Ubuntu 12.04 x64","ram":"1024 MB","disk":"Virtual 20 GB","main_ip":"108.61.206.153","vcpu_count":"1","location":"Los Angeles","DCID":"5","default_password":"twizewnatpom!7","date_created":"2014-03-21 12:46:35","pending_charges":"1.92","status":"active","cost_per_month":"7.00","current_bandwidth_gb":0.929,"allowed_bandwidth_gb":"2000","netmask_v4":"255.255.254.0","gateway_v4":"108.61.206.1","power_status":"running","VPSPLANID":"30","v6_network":"::","v6_main_ip":"","v6_network_size":"0","label":"","internal_ip":"","kvm_url":"https:\/\/my.vultr.com\/subs\/vps\/novnc\/api.php?data=IF3C6VCEN5NFOZ3VMM3FOV3JJVXUQV3OHBWDG6TUKI3VST3JMFDXOOCMIE3HCTBWKVJXOZZYF5BVMZ3IM5XXGWRZIVZW4S2WKAVTSMTQHFCG4QTCNFUEKOCXKY3CW3LGNF4HIVTVJ5GXM4CJORTU6SKYOBDE6WJVMZ3E4ZSVOB2FQ4KYF5DXC5CTJI4FETDIGBITQQZPGFLXKTSRKRJS6ODOMFKDSNLLNVETONSNKA2XQ6CWLJMW6L2EGI2U6SDNN5FGUTJYNB3UC5DXN46Q","auto_backups":"no"},"2":{"SUBID":"2","os":"Ubuntu 14.04 x64","ram":"768 MB","disk":"Virtual 15 GB","main_ip":"104.207.153.143","vcpu_count":"1","location":"Los Angeles","DCID":"5","default_password":"cewxoaezap!0","date_created":"2014-11-08 14:12:13","pending_charges":"0.01","status":"active","cost_per_month":"5.00","current_bandwidth_gb":0,"allowed_bandwidth_gb":"1000","netmask_v4":"255.255.254.0","gateway_v4":"104.207.152.1","power_status":"running","VPSPLANID":"29","v6_network":"::","v6_main_ip":"","v6_network_size":"0","label":"vultr-test1","internal_ip":"","kvm_url":"https:\/\/my.vultr.com\/subs\/vps\/novnc\/api.php?data=NBUUYMTDI4VXGVZXOFBE6UBWKFLWE43MI5EFOR3MJNZW4NRXLFBHA33BHF3C63LSOJRXAU2PO5GHM5LPOFAW2MSDMFZWUMCNNJRG6TRWJREWYNBLG5VTG2DEGIYWITKIGV2FA3JTNJVEETLOGBHFG42XLEZHG22VFNWHE5RUKFIWU3DSOJCS6WDQGJRDIZRPIU2HILZTKB4E4MZSNZIFEQ3SOFSDANCBHBBEWRLVGZEUEVDSJVQVKOKZNQ4GKSRSIJEG62TWMREG6USNIE6Q","auto_backups":"no"}} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/vultr/list_sizes.json0000664000175000017500000001041513153541406026761 0ustar kamikami00000000000000{"201":{"VPSPLANID":"201","name":"1024 MB RAM,25 GB SSD,1.00 TB BW","vcpu_count":"1","ram":"1024","disk":"25","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"5.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,7,8,9,12,19,24,25,39,40]},"202":{"VPSPLANID":"202","name":"2048 MB RAM,40 GB SSD,2.00 TB BW","vcpu_count":"1","ram":"2048","disk":"40","bandwidth":"2.00","bandwidth_gb":"2048","price_per_month":"10.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,7,8,9,12,19,24,25,39,40]},"203":{"VPSPLANID":"203","name":"4096 MB RAM,60 GB SSD,3.00 TB BW","vcpu_count":"2","ram":"4096","disk":"60","bandwidth":"3.00","bandwidth_gb":"3072","price_per_month":"20.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,7,8,9,12,19,24,25,39,40]},"204":{"VPSPLANID":"204","name":"8192 MB RAM,100 GB SSD,4.00 TB BW","vcpu_count":"4","ram":"8192","disk":"100","bandwidth":"4.00","bandwidth_gb":"4096","price_per_month":"40.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,7,8,9,12,19,24,25,39,40]},"205":{"VPSPLANID":"205","name":"16384 MB RAM,200 GB SSD,5.00 TB BW","vcpu_count":"6","ram":"16384","disk":"200","bandwidth":"5.00","bandwidth_gb":"5120","price_per_month":"80.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,8,9,12,19,24,25,39,40]},"206":{"VPSPLANID":"206","name":"32768 MB RAM,300 GB SSD,6.00 TB BW","vcpu_count":"8","ram":"32768","disk":"300","bandwidth":"6.00","bandwidth_gb":"6144","price_per_month":"160.00","plan_type":"SSD","windows":false,"available_locations":[1,2,3,4,5,6,8,9,12,19,24,25,39,40]},"207":{"VPSPLANID":"207","name":"65536 MB RAM,400 GB SSD,10.00 TB BW","vcpu_count":"16","ram":"65536","disk":"400","bandwidth":"10.00","bandwidth_gb":"10240","price_per_month":"320.00","plan_type":"SSD","windows":false,"available_locations":[2,3,4,6,8,9,12,19,24,39,40]},"208":{"VPSPLANID":"208","name":"98304 MB RAM,800 GB SSD,15.00 TB BW","vcpu_count":"24","ram":"98304","disk":"800","bandwidth":"15.00","bandwidth_gb":"15360","price_per_month":"640.00","plan_type":"SSD","windows":false,"available_locations":[2,3,4,6,9,12,19,39]},"87":{"VPSPLANID":"87","name":"512 MB RAM,125 GB SATA,1.00 TB BW","vcpu_count":"1","ram":"512","disk":"125","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"5.00","plan_type":"SATA","windows":false,"available_locations":[5,7]},"88":{"VPSPLANID":"88","name":"1024 MB RAM,250 GB SATA,2.00 TB BW","vcpu_count":"1","ram":"1024","disk":"250","bandwidth":"2.00","bandwidth_gb":"2048","price_per_month":"10.00","plan_type":"SATA","windows":false,"available_locations":[]},"89":{"VPSPLANID":"89","name":"2048 MB RAM,500 GB SATA,3.00 TB BW","vcpu_count":"1","ram":"2048","disk":"500","bandwidth":"3.00","bandwidth_gb":"3072","price_per_month":"20.00","plan_type":"SATA","windows":false,"available_locations":[]},"90":{"VPSPLANID":"90","name":"3072 MB RAM,750 GB SATA,4.00 TB BW","vcpu_count":"2","ram":"3072","disk":"750","bandwidth":"4.00","bandwidth_gb":"4096","price_per_month":"30.00","plan_type":"SATA","windows":false,"available_locations":[]},"91":{"VPSPLANID":"91","name":"4096 MB RAM,1000 GB SATA,5.00 TB BW","vcpu_count":"2","ram":"4096","disk":"1000","bandwidth":"5.00","bandwidth_gb":"5120","price_per_month":"40.00","plan_type":"SATA","windows":false,"available_locations":[]},"115":{"VPSPLANID":"115","name":"8192 MB RAM,110 GB SSD,10.00 TB BW","vcpu_count":"2","ram":"8192","disk":"110","bandwidth":"10.00","bandwidth_gb":"10240","price_per_month":"60.00","plan_type":"DEDICATED","windows":false,"available_locations":[1,2,25]},"116":{"VPSPLANID":"116","name":"16384 MB RAM,110 GB SSD,20.00 TB BW","vcpu_count":"4","ram":"16384","disk":"110","bandwidth":"20.00","bandwidth_gb":"20480","price_per_month":"120.00","plan_type":"DEDICATED","windows":false,"available_locations":[2]},"117":{"VPSPLANID":"117","name":"24576 MB RAM,110 GB SSD,30.00 TB BW","vcpu_count":"6","ram":"24576","disk":"110","bandwidth":"30.00","bandwidth_gb":"30720","price_per_month":"180.00","plan_type":"DEDICATED","windows":false,"available_locations":[2]},"118":{"VPSPLANID":"118","name":"32768 MB RAM,110 GB SSD,40.00 TB BW","vcpu_count":"8","ram":"32768","disk":"110","bandwidth":"40.00","bandwidth_gb":"40960","price_per_month":"240.00","plan_type":"DEDICATED","windows":false,"available_locations":[2]}}apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/0000775000175000017500000000000013160535107024352 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json0000664000175000017500000000172212701023453032004 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Destroyed","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.0000664000175000017500000000011712701023453034123 0ustar kamikami00000000000000{ "listavailableproducttypesresponse" : { "count" : 0, "producttypes" : [] } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json0000664000175000017500000000172012701023453032001 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17165,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json0000664000175000017500000007257312701023453034125 0ustar kamikami00000000000000{"listavailableproducttypesresponse":{"count":112,"producttypes":[{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-829-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-829-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-829-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-829-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-829-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-829-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-829-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-829-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-830-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-830-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-830-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-830-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-830-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-830-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-830-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-830-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-867-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-867-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-867-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-867-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-867-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-867-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-867-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-867-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-880-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-880-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-880-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-880-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-880-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-880-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-880-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-880-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-881-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-881-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-881-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-881-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-881-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-881-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-881-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-881-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-877-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-877-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-877-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-877-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-877-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-877-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-877-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-877-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-878-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-878-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-878-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-878-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-878-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-878-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-878-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-878-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-988-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-988-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-988-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-988-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-988-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-988-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-988-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-988-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-986-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-986-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-986-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-986-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-986-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-986-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-986-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-986-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-989-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-989-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-989-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-989-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-989-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-989-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-989-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-989-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-990-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-990-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-990-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-990-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-990-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-990-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-990-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-990-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-991-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-991-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-991-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-991-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-991-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-991-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-991-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-991-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1111-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1111-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1111-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1111-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1111-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1111-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1111-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1111-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1109-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1109-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1109-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1109-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1109-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1109-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1109-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1109-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"}]}} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json0000664000175000017500000000016012701023453033576 0ustar kamikami00000000000000{ "deployvirtualmachineresponse" : {"errorcode" : 431, "errortext" : "Unable to find service offering: 104"} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listZones_default.json0000664000175000017500000000017412701023453030741 0ustar kamikami00000000000000{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json0000664000175000017500000000321512701023453032740 0ustar kamikami00000000000000{ "listvirtualmachinesresponse" : { "virtualmachine" : [ {"id":2600,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:06:42+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"1.78%","networkkbsread":2,"networkkbswrite":2,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3891,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.116","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2601,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:09:44+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"jobid":17147,"jobstatus":0,"nic":[{"id":3892,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.203","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json0000664000175000017500000000217012701023453033752 0ustar kamikami00000000000000{"listavailableproducttypesresponse": {"count": 2, "producttypes": [{"diskofferingdesc": "100GB", "templatedesc": "Centos 5.4 32bit", "serviceofferingdesc": "premium 2vCore 2GB RAM", "productstate": "available", "product": "Standard", "zoneid": "eceb5d65-6571-4696-875f-5a17949f3317", "zonedesc": "KOR-Central A", "templateid": "d2241244-0b6c-444d-b075-72cfcd562826", "diskofferingid": "cc85e4dd-bfd9-4cec-aa22-cf226c1da92f", "serviceofferingid": "94341d94-ccd4-4dc4-9ccb-05c0c632d0b4", "productid": "eceb5d65-6571-4696-875f-5a17949f3317-d2241244-0b6c-444d-b075-72cfcd562826-94341d94-ccd4-4dc4-9ccb-05c0c632d0b4-cc85e4dd-bfd9-4cec-aa22-cf226c1da92f"}, {"diskofferingdesc": "20GB", "templatedesc": "Centos 5.4 32bit", "serviceofferingdesc": "premium 2vCore 2GB RAM", "productstate": "available", "product": "Standard", "zoneid": "eceb5d65-6571-4696-875f-5a17949f3317", "zonedesc": "KOR-Central A", "templateid": "d2241244-0b6c-444d-b075-72cfcd562826", "serviceofferingid": "94341d94-ccd4-4dc4-9ccb-05c0c632d0b4", "productid": "eceb5d65-6571-4696-875f-5a17949f3317-d2241244-0b6c-444d-b075-72cfcd562826-94341d94-ccd4-4dc4-9ccb-05c0c632d0b4-0"}]}} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json0000664000175000017500000000010212701023453031775 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17177,"jobstatus":2} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json0000664000175000017500000000007712701023453033101 0ustar kamikami00000000000000{ "deployvirtualmachineresponse" : {"jobid":17164,"id":2602} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json0000664000175000017500000000006612701023453033274 0ustar kamikami00000000000000{ "destroyvirtualmachineresponse" : {"jobid":17166} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json0000664000175000017500000000006512701023453033074 0ustar kamikami00000000000000{ "rebootvirtualmachineresponse" : {"jobid":17165} } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.js0000664000175000017500000000163412701023453034153 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.CreateVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Allocated","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"None","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering","diskofferingdisplaytext":"Disk offering display name","storage":"none","destroyed":false,"isextractable":false}},"created":"2012-06-05T08:47:54+0200","jobid":"35416f6d-1b5b-4ceb-a7d4-aab0deede71b"} } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.jsonapache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.js0000664000175000017500000000217312701023453034153 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.AttachVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","deviceid":5,"virtualmachineid":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmname":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmdisplayname":"gre-kickstart","vmstate":"Running","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Ready","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"KVM","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering 1","diskofferingdisplaytext":"Disk offering 1 display name","storage":"Shared Storage CL01","attached":"2012-06-05T09:17:38+0200","destroyed":false,"isextractable":false}},"created":"2012-06-05T09:17:38+0200","jobid":"e07d6b9b-2b6c-45bd-840b-3c4c3d890168"} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json0000664000175000017500000000162712701023453032006 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17164,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listIpForwardingRules_default.json0000664000175000017500000000050012701023453033242 0ustar kamikami00000000000000{ "listipforwardingrulesresponse" : { "count":1 ,"ipforwardingrule" : [ {"id":"772fd410-6649-43ed-befa-77be986b8906","protocol":"tcp","virtualmachineid":"2600","virtualmachinename":"test","virtualmachinedisplayname":"test","ipaddressid":34000,"ipaddress":"1.1.1.116","startport":33,"endport":34,"state":"Active"} ] } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json0000664000175000017500000000241112701023453033204 0ustar kamikami00000000000000{ "listpublicipaddressesresponse" : { "publicipaddress" : [ {"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-23T05:20:39+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33999,"ipaddress":"1.1.1.48","allocated":"2011-06-23T05:20:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33998,"ipaddress":"1.1.1.47","allocated":"2011-06-23T05:20:30+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33970,"ipaddress":"1.1.1.19","allocated":"2011-06-20T04:08:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":true,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"} ] } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json0000664000175000017500000000075312701023453033630 0ustar kamikami00000000000000{ "listportforwardingrulesresponse" : {"count": 1, "portforwardingrule": [{"protocol": "tcp", "virtualmachineid": "7d8de712-aa7a-4901-a8b1-fd223f0ca459", "ipaddress": "178.170.71.253", "cidrlist": "", "tags": [], "ipaddressid": "50cd9456-d4db-4a48-8cf5-950dba8d2fdb", "virtualmachinedisplayname": "yoyo", "privateendport": "22", "state": "Active", "publicendport": "22", "privateport": "22", "virtualmachinename": "yoyo", "publicport": "22", "id": "4644652a-7573-4e50-aafb-48a171c9bcb2"}]}} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json0000664000175000017500000000007712701023453033667 0ustar kamikami00000000000000{ "deployvirtualmachineresponse" : {"jobid":17177,"id":2602} } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/0000775000175000017500000000000013160535107023272 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_images.xml0000664000175000017500000000244512701023453027122 0ustar kamikami00000000000000 1 1 10 cn-qingdao FAD4D9B9-D75F-4A9E-BC13-991C0F06F50F freebsd1001_64_20G_aliaegis_20150527.vhd freebsd1001_64_20G_aliaegis_20150527.vhd linux x86_64 FreeBSD 10.1 64位 /dev/xvda 20 system 100% instance 2015-06-19T07:25:42Z 1.0.0 Available freebsd1001_64_20G_aliaegis_20150527.vhd false false Freebsd 20 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_security_group.xml0000664000175000017500000000026313153541406030424 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 sg-F876FF7BA apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/reboot_instance.xml0000664000175000017500000000022712701023453027167 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/destroy_volume_describe_disks.xml0000664000175000017500000000265012701023453032130 0ustar kamikami00000000000000 PostPaid true true true cloud Description ubuntu1404sys 5 system i-28whl2nj2 2014-07-23T02:44:06Z ubuntu1404_64_20G_aliaegis_20150325.vhd cn-qingdao-b 2016-01-04T15:02:17Z /dev/xvda false cn-qingdao d-28zfrmo13 Available 2999-09-08T16:00Z 1 10 1 ED5CF6DD-71CA-462C-9C94-A61A78A01479 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_instance_types.xml0000664000175000017500000000130412701023453030676 0ustar kamikami00000000000000 1651FBB6-4FBF-49FF-A9F5-DF5D696C7EC6 ecs.t1.xsmall 1 0.5 ecs.t1 ecs.s2.small 2 1.0 ecs.s2 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/copy_image.xml0000664000175000017500000000025512701023453026126 0ustar kamikami00000000000000 i-28n7dkvov DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_snapshots.xml0000664000175000017500000000133112701023453027670 0ustar kamikami00000000000000 1 1 10 09CAE6FF-B864-4104-BBF4-FE1EF08066A6 2016-01-05T11:12:42Z progressing sys-snapshot-20160108 system d-28x069z28 s-28n9lltbf 20 0% none apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/delete_snapshot.xml0000664000175000017500000000022712701023453027172 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_disks.xml0000664000175000017500000000471512701023453026774 0ustar kamikami00000000000000 PostPaid true false false cloud 5 data 2014-07-23T02:44:07Z cn-qingdao-b 2014-07-23T07:47:35Z 2014-07-23T08:28:48Z true cn-qingdao d-28m5zbua0 Available PostPaid true true true cloud Description ubuntu1404sys 5 system i-28whl2nj2 2014-07-23T02:44:06Z ubuntu1404_64_20G_aliaegis_20150325.vhd cn-qingdao-b 2016-01-04T15:02:17Z /dev/xvda false cn-qingdao d-28zfrmo13 In_use 2999-09-08T16:00Z 1 10 2 ED5CF6DD-71CA-462C-9C94-A61A78A01479 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_image.xml0000664000175000017500000000026112701023453026414 0ustar kamikami00000000000000 i-28n7dkvov DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/join_security_group_by_id.xml0000664000175000017500000000023513153541406031265 0ustar kamikami00000000000000 473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/pages_describe_images.xml0000664000175000017500000000244313153541406030304 0ustar kamikami00000000000000 1 2 1 cn-qingdao FAD4D9B9-D75F-4A9E-BC13-991C0F06F50F freebsd1001_64_20G_aliaegis_20150527.vhd freebsd1001_64_20G_aliaegis_20150527.vhd linux x86_64 FreeBSD 10.1 64位 /dev/xvda 20 system 100% instance 2015-06-19T07:25:42Z 1.0.0 Available freebsd1001_64_20G_aliaegis_20150527.vhd false false Freebsd 20 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/reboot_node_describe_instances.xml0000664000175000017500000000353212701023453032221 0ustar kamikami00000000000000 1 1 10 CA75EE06-D5F7-433C-870B-5042EED6C1DC ubuntu1404_64_20G_aliaegis_20150325.vhd 10.163.197.74 ecs.t1 i-28n7dkvov -1 cn-qingdao-b PayByTraffic ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3 false 1024 1 1 true sg-28ou0f3xa iZ28n7dkvovZ classic 114.215.124.73 iZ28n7dkvovZ ecs.t1.small 2015-12-27T07:35Z Running cn-qingdao PostPaid 2999-09-08T16:00Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/detach_volume_describe_disks.xml0000664000175000017500000000264512701023453031673 0ustar kamikami00000000000000 PostPaid true true true cloud Description ubuntu1404sys 5 system i-28whl2nj2 2014-07-23T02:44:06Z ubuntu1404_64_20G_aliaegis_20150325.vhd cn-qingdao-b 2016-01-04T15:02:17Z /dev/xvda false cn-qingdao d-28zfrmo13 In_use 2999-09-08T16:00Z 1 10 1 ED5CF6DD-71CA-462C-9C94-A61A78A01479 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/delete_security_group_by_id.xml0000664000175000017500000000024013153541406031564 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_security_group_attributes.xml0000664000175000017500000000152513153541406033211 0ustar kamikami00000000000000 94D38899-626D-434A-891F-7E1F77A81525 4 1 10 cn-hangzhou sg-F876FF7BA Test sg-086FFC27A test00212 sg-BA4B7975B cn-hangzhou test group sg-35F20777C cn-hangzhou test group apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_instance.xml0000664000175000017500000000027512701023453027143 0ustar kamikami00000000000000 i-28n7dkvov DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/delete_image.xml0000664000175000017500000000022112701023453026407 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_instances.xml0000664000175000017500000000353312701023453027643 0ustar kamikami00000000000000 1 1 10 CA75EE06-D5F7-433C-870B-5042EED6C1DC ubuntu1404_64_20G_aliaegis_20150325.vhd 10.163.197.74 ecs.t1 i-28n7dkvov -1 cn-qingdao-b PayByTraffic ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3 false 1024 1 1 true sg-28ou0f3xa iZ28n7dkvovZ classic 114.215.124.73 iZ28n7dkvovZ ecs.t1.small 2015-12-27T07:35Z Starting cn-qingdao PostPaid 2999-09-08T16:00Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml0000664000175000017500000000353212701023453032172 0ustar kamikami00000000000000 1 1 10 CA75EE06-D5F7-433C-870B-5042EED6C1DC ubuntu1404_64_20G_aliaegis_20150325.vhd 10.163.197.74 ecs.t1 i-28n7dkvov -1 cn-qingdao-b PayByTraffic ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3 false 1024 1 1 true sg-28ou0f3xa iZ28n7dkvovZ classic 114.215.124.73 iZ28n7dkvovZ ecs.t1.small 2015-12-27T07:35Z Running cn-qingdao PostPaid 2999-09-08T16:00Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/get_image_describe_images.xml0000664000175000017500000000043612701023453031121 0ustar kamikami00000000000000 0 0 10 cn-qingdao FAD4D9B9-D75F-4A9E-BC13-991C0F06F50F apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/leave_security_group_by_id.xml0000664000175000017500000000023713153541406031424 0ustar kamikami00000000000000 473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_zones.xml0000664000175000017500000000322113153541406027011 0ustar kamikami00000000000000 CE75D5FB-C343-47DB-882D-E0709D95D61E IoOptimized Instance Disk ecs.m2.medium ecs.m1.medium ecs.s2.xlarge ecs.t1.xsmall ecs.s2.large ecs.s2.2xlarge ecs.s3.medium ecs.m1.xlarge ecs.s1.small ecs.s1.large ecs.c2.xlarge ecs.c2.large ecs.s3.large ecs.c1.small ecs.m2.xlarge ecs.c2.medium ecs.t1.small ecs.c1.large ecs.s2.small ecs.s1.medium cn-qingdao-b 青岛可用区B cloud_ssd ephemeral cloud apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/detach_disk.xml0000664000175000017500000000021712701023453026252 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/pages_describe_images_page2.xml0000664000175000017500000000244412701023453031356 0ustar kamikami00000000000000 2 2 1 cn-qingdao FAD4D9B9-D75F-4A9E-BC13-991C0F06F50F freebsd1001_64_20G_aliaegis_20150527.vhd freebsd1001_64_20G_aliaegis_20150527.vhd linux x86_64 FreeBSD 10.1 64位 /dev/xvda 20 system 100% instance 2015-06-19T07:25:42Z 1.0.0 Available freebsd1001_64_20G_aliaegis_20150527.vhd false false Freebsd 20 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml0000664000175000017500000000264512701023453031706 0ustar kamikami00000000000000 PostPaid true true true cloud Description ubuntu1404sys 5 system i-28whl2nj2 2014-07-23T02:44:06Z ubuntu1404_64_20G_aliaegis_20150325.vhd cn-qingdao-b 2016-01-04T15:02:17Z /dev/xvda false cn-qingdao d-28zfrmo13 In_use 2999-09-08T16:00Z 1 10 1 ED5CF6DD-71CA-462C-9C94-A61A78A01479 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/delete_disk.xml0000664000175000017500000000021712701023453026264 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/stop_instance.xml0000664000175000017500000000022312701023453026656 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_regions.xml0000664000175000017500000000201113153541406027315 0ustar kamikami00000000000000 FD28A957-20B2-447E-8A5A-952F50C4EDF0 ap-southeast-1 亚太(新加坡) cn-shenzhen 深圳 cn-qingdao 青岛 cn-beijing 北京 cn-shanghai 上海 us-east-1 美东弗吉尼亚 cn-hongkong 香港 cn-hangzhou 杭州 us-west-1 美国硅谷 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/delete_instance.xml0000664000175000017500000000022712701023453027137 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/start_instance.xml0000664000175000017500000000022512701023453027030 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/stop_node_describe_instances.xml0000664000175000017500000000353212701023453031714 0ustar kamikami00000000000000 1 1 10 CA75EE06-D5F7-433C-870B-5042EED6C1DC ubuntu1404_64_20G_aliaegis_20150325.vhd 10.163.197.74 ecs.t1 i-28n7dkvov -1 cn-qingdao-b PayByTraffic ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3 false 1024 1 1 true sg-28ou0f3xa iZ28n7dkvovZ classic 114.215.124.73 iZ28n7dkvovZ ecs.t1.small 2015-12-27T07:35Z Stopped cn-qingdao PostPaid 2999-09-08T16:00Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/destroy_node_describe_instances.xml0000664000175000017500000000353212701023453032420 0ustar kamikami00000000000000 1 1 10 CA75EE06-D5F7-433C-870B-5042EED6C1DC ubuntu1404_64_20G_aliaegis_20150325.vhd 10.163.197.74 ecs.t1 i-28n7dkvov -1 cn-qingdao-b PayByTraffic ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3 false 1024 1 1 true sg-28ou0f3xa iZ28n7dkvovZ classic 114.215.124.73 iZ28n7dkvovZ ecs.t1.small 2015-12-27T07:35Z Stopped cn-qingdao PostPaid 2999-09-08T16:00Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_snapshot.xml0000664000175000017500000000027512701023453027176 0ustar kamikami00000000000000 i-28n7dkvov DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/describe_security_groups.xml0000664000175000017500000000113612701023453031117 0ustar kamikami00000000000000 1 1 10 cn-qingdao 7F154B0C-2594-416D-B546-75021185A6DA 2015-06-26T08:35:30Z sg-28ou0f3xa sg-28ou0f3xa System created security group. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_public_ip.xml0000664000175000017500000000032513153541406027306 0ustar kamikami00000000000000 F2EF6A3B-E345-46B9-931E-0EA094818567 10.1.149.159 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/attach_disk.xml0000664000175000017500000000021712701023453026266 0ustar kamikami00000000000000 DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/ecs/create_disk.xml0000664000175000017500000000025512701023453026267 0ustar kamikami00000000000000 i-28n7dkvov DA38B11A-9D6D-420B-942F-95D68606C4FC apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/0000775000175000017500000000000013160535107023602 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/ssh_list.xml0000664000175000017500000000070612701023453026153 0ustar kamikami00000000000000 fingerprint a6:1f:b8:b4:19:91:99:d8:af:ab:d6:17:72:8b:d1:6c id 10 name testkey apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_info.xml0000664000175000017500000000265012701023453026270 0ustar kamikami00000000000000 datacenter_id 1 name libcloud snapshot_profile kernel_version can_snapshot 1 kernel_cmdline visibility private label vms_id source state created is_boot_disk 0 date_updated 20120629T11:49:00 date_created 20120629T11:48:20 type data id 1263 size 1024 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_info.xml0000664000175000017500000001545512701023453025767 0ustar kamikami00000000000000 memory 256 hostname test2 console 0 description triggers date_updated 20110120T15:25:07 disks datacenter_id 1 name test2 kernel_version 2.6.32 can_snapshot kernel_cmdline root /dev/xvda1 ro 1 console xvc0 nosep 1 visibility private label Debian 5 vms_id 250133 source 23351 state running is_boot_disk 1 date_updated 20110120T15:02:01 date_created 20110120T14:57:55 type data id 34951 size 3072 disks_id 34951 datacenter_id 1 state running flex_shares 0 ai_active 0 vm_max_memory 2048 ifaces date_updated 20110120T14:58:44 vm_id 250133 bandwidth 5120.0 datacenter_id 1 ips reverse xvm-6-186.ghst.net iface_id 7857 date_updated 20110120T14:57:55 ip 10.5.6.186 datacenter_id 1 state created num 0 version 4 date_created 20101028T12:49:11 id 9256 reverse xvm6-fe37-9f7b.ghst.net iface_id 7857 date_updated 20110120T14:58:44 ip 2001:4b98:dc0:543:216:3eff:fe37:9f7b datacenter_id 1 state created num 1 version 6 date_created 20110120T14:58:44 id 9294 state used num 0 ips_id 9256 9294 date_created 20110120T14:57:55 type public id 7857 cores 1 ifaces_id 7857 graph_urls vcpu http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vcpu&device_number=0 vdi http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vdi&device_number=0 vif http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vif&device_number=0 date_created 20110120T14:57:55 id 250133 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_create.xml0000664000175000017500000000172112701023453026576 0ustar kamikami00000000000000 iface_id date_updated 20120629T11:48:20 vm_id date_start disk_id 1263 source AB3917-GANDI step DONE ip_id date_created 20120629T11:48:20 type disk_create id 10895 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_attach.xml0000664000175000017500000000202412701023453026574 0ustar kamikami00000000000000 iface_id date_updated 20110921T12:57:05 vm_id 250133 date_start disk_id 34918 source AB3917-GANDI step WAIT ip_id date_created 20110921T12:57:05 type disk_attach id 657982 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_list.xml0000664000175000017500000000527512701023453026006 0ustar kamikami00000000000000 memory 256 console 0 description date_updated 20110120T15:25:07 hostname test1 disks_id 34951 datacenter_id 1 state running flex_shares 0 ai_active 0 vm_max_memory 2048 cores 1 ifaces_id 7857 date_created 20110120T14:57:55 id 250133 memory 256 console 0 description date_updated 20110225T12:09:31 hostname test2 disks_id 34954 datacenter_id 1 state halted flex_shares 0 ai_active 0 vm_max_memory 2048 cores 1 ifaces_id 7861 date_created 20110124T15:53:44 id 250136 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/iface_list.xml0000664000175000017500000000370412701023453026426 0ustar kamikami00000000000000 date_updated 20110120T14:58:44 vm_id 250133 bandwidth 5120.0 datacenter_id 1 state used num 0 ips_id 9256 9294 date_created 20110120T14:57:55 type public id 7857 date_updated 20110324T17:14:16 vm_id 250288 bandwidth 5192.0 datacenter_id 1 state used num 0 ips_id 9298 9508 date_created 20110324T17:14:06 type public id 8019 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_create_from.xml0000664000175000017500000000202412701023453027616 0ustar kamikami00000000000000 iface_id date_updated 20110921T14:20:56 vm_id date_start disk_id 35288 source AB3917-GANDI step WAIT ip_id date_created 20110921T14:20:56 type disk_create id 657985 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_reboot.xml0000664000175000017500000000202312701023453026311 0ustar kamikami00000000000000 iface_id date_updated 20110325T13:18:27 vm_id 250133 date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110325T13:18:27 type vm_reboot id 637398 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_delete.xml0000664000175000017500000000202312701023453026261 0ustar kamikami00000000000000 iface_id date_updated 20110324T15:49:32 vm_id 250136 date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110324T15:49:32 type vm_delete id 637366 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_delete.xml0000664000175000017500000000172112701023453026575 0ustar kamikami00000000000000 iface_id date_updated 20120629T11:47:06 vm_id date_start disk_id 1262 source AB3917-GANDI step WAIT ip_id date_created 20120629T11:47:06 type disk_delete id 10894 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/image_list_dc0.xml0000664000175000017500000006214712701023453027175 0ustar kamikami00000000000000 date_updated 20100928T10:41:38 disk_id 34198 label GandiOS datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 2 date_updated 20100811T16:30:06 disk_id 11233 label Mandriva 2008.0 datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 3 date_updated 20100811T16:30:06 disk_id 11235 label Centos 5 datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 4 date_updated 20100811T16:30:06 disk_id 11236 label Fedora Core 7 datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 5 date_updated 20100811T16:30:06 disk_id 11237 label Open SUSE 10.3 datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 6 date_updated 20100811T16:30:06 disk_id 11238 label Debian 4 datacenter_id 1 visibility all os_arch x86-32 date_created 20070101T00:00:00 author_id 248842 id 7 date_updated 20100811T16:30:06 disk_id 11239 label Fedora Core 8 datacenter_id 1 visibility all os_arch x86-32 date_created 20080101T00:00:00 author_id 248842 id 8 date_updated 20100811T16:30:06 disk_id 11240 label Open SUSE 11.0 datacenter_id 1 visibility all os_arch x86-32 date_created 20080101T00:00:00 author_id 248842 id 9 date_updated 20100811T16:30:06 disk_id 11241 label Mandriva 2008.1 datacenter_id 1 visibility all os_arch x86-32 date_created 20080101T00:00:00 author_id 248842 id 10 date_updated 20100811T16:30:06 disk_id 11242 label Ubuntu 8.04 datacenter_id 1 visibility all os_arch x86-32 date_created 20080101T00:00:00 author_id 248842 id 11 date_updated 20100922T11:56:05 disk_id 23351 label Debian 5 datacenter_id 1 visibility all os_arch x86-32 date_created 20090101T00:00:00 author_id 248842 id 12 date_updated 20100811T16:30:06 disk_id 23352 label Ubuntu 9.04 datacenter_id 1 visibility all os_arch x86-32 date_created 20090101T00:00:00 author_id 248842 id 13 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_list.xml0000664000175000017500000000767112701023453026320 0ustar kamikami00000000000000 datacenter_id 1 name disk_libcloud2 kernel_version 2.6.32 can_snapshot 0 visibility private label Debian 5 vms_id source 23351 state created is_boot_disk 0 date_updated 20101116T10:51:59 date_created 20101028T13:52:38 type data id 34918 size 3072 datacenter_id 1 name test1 kernel_version 2.6.32 can_snapshot visibility private label Debian 5 vms_id 250133 source 23351 state created is_boot_disk 1 date_updated 20110120T15:02:01 date_created 20110120T14:57:55 type data id 34951 size 3072 datacenter_id 1 name test_disk kernel_version 2.6.32 can_snapshot 1 visibility private label Debian 5 vms_id 250288 source 23351 state created is_boot_disk 1 date_updated 20110325T16:31:11 date_created 20110324T17:14:06 type data id 35170 size 3072 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/ssh_info.xml0000664000175000017500000000155312701023453026134 0ustar kamikami00000000000000 fingerprint a6:1f:b8:b4:19:91:99:d8:af:ab:d6:17:72:8b:d1:6c name testkey value ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaCXFxl0cPZa+PkXSaux/9Sfn4J81eNJ4f/ZkjdIlmLJVYFUKbpC16eEwXYEfw/QBAZFPODCDQOFAZdgajO572y9scp09F7L7Rhwrw7DYu8STMIBz0XBIO8eOUyu5hVRpxaZGDih9B99e1hITTGFg+BveAmrdB8CPtygKo/fUmaamrocZBrD1betaLTC0i6/DVz7YAbR0CleZLlaBogqVhqmS0TB4J67aG2vvq1MjyOixQY5Ab4aXo4Dz1jd7oqCGCKCO9oKAG0ok94foxkfnCmfRrnfWzOA7SFWjUs65SOrGYZghspDcbJ9vA4ZkUuWJXPPvLVgsI8aHwkezJPD8Th root@testhost id 10 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_stop.xml0000664000175000017500000000202112701023453026002 0ustar kamikami00000000000000 iface_id date_updated 20110325T13:19:52 vm_id 250133 date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110325T13:19:52 type vm_stop id 637399 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/vm_create_from.xml0000664000175000017500000000561412701023453027316 0ustar kamikami00000000000000 iface_id date_updated 20110324T17:14:06 type disk_create date_start disk_id 35170 source AB3917-GANDI step WAIT ip_id date_created 20110324T17:14:06 vm_id id 637370 iface_id 8019 date_updated 20110324T17:14:06 vm_id date_start disk_id source AB3917-GANDI step WAIT ip_id 9298 date_created 20110324T17:14:06 type iface_create id 637371 iface_id date_updated 20110324T17:14:07 type vm_create date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110324T17:14:07 vm_id 250288 id 637372 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/account_info.xml0000664000175000017500000001417112701023453026773 0ustar kamikami00000000000000 handle AB3917-GANDI products errors_for_updating product_name_does_not_match no_action_on_free_product can_release 1 date_end product_name shares_fixed autorenew errors_for_removing errors_for_releasing no_action_on_free_product not_available_resource is_in_redemption errors_for_autorenewing no_action_on_free_product duration 1y date_created 20101028T12:38:17 quantity 12 errors_for_renewing no_action_on_free_product id 11153 redemption 7 errors_for_updating no_action_on_free_product can_release 0 date_end product_name ips autorenew errors_for_removing errors_for_releasing no_action_on_free_product db_can_not_release is_in_redemption errors_for_autorenewing no_action_on_free_product duration 1m date_created 20110124T11:42:35 quantity 4 errors_for_renewing no_action_on_free_product id 11196 redemption 7 share_definition servers 1 bandwidth 5120.0 memory 256 cores 0.25 slots 0.66666666666666663 disk 8192 fullname Aymeric Barantal id 58757 resources available shares 12 servers 8 ips 4 bandwidth 51200.0 memory 2560 cores 3.0 slots 4.0 disk 89088 granted shares 12 servers 12 ips 8 bandwidth 61440 memory 3072 cores 5.0 slots 8.0 disk 98304 used servers 4 ips 4 bandwidth 10240.0 memory 512 cores 2.0 slots 4 disk 9216 expired apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_detach.xml0000664000175000017500000000202412701023453026560 0ustar kamikami00000000000000 iface_id date_updated 20110921T12:57:35 vm_id 250133 date_start disk_id 34918 source AB3917-GANDI step WAIT ip_id date_created 20110921T12:57:35 type disk_detach id 657983 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/operation_info.xml0000664000175000017500000000157712701023453027345 0ustar kamikami00000000000000 date_updated 20110324T15:49:50 last_error date_start source AB3917-GANDI step DONE eta 39 date_created 20110324T15:49:32 type vm_delete id 637366 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/ssh_delete.xml0000664000175000017500000000022212701023453026433 0ustar kamikami00000000000000 1 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/datacenter_list.xml0000664000175000017500000000520213153541406027471 0ustar kamikami00000000000000 country France iso FR id 1 dc_code FR-SD2 name Equinix Paris country United States of America iso US dc_code US-BA1 id 2 name Level3 Baltimore apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/iface_attach.xml0000664000175000017500000000202412701023453026711 0ustar kamikami00000000000000 iface_id 7857 date_updated 20110921T12:49:35 vm_id 250133 date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110921T12:49:35 type iface_attach id 657980 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/ip_list.xml0000664000175000017500000001232312701023453025764 0ustar kamikami00000000000000 reverse xvm-6-186.ghst.net iface_id 7857 date_updated 20110120T14:57:55 ip 10.5.6.186 datacenter_id 1 state created num 0 version 4 date_created 20101028T12:49:11 id 9256 reverse xvm6-fe37-9f7b.ghst.net iface_id 7857 date_updated 20110120T14:58:44 ip 2001:4b98:dc0:543:216:3eff:fe37:9f7b datacenter_id 1 state created num 1 version 6 date_created 20110120T14:58:44 id 9294 reverse xvm-6-179.ghst.net iface_id 7861 date_updated 20110124T15:53:44 ip 10.5.6.179 datacenter_id 1 state created num 0 version 4 date_created 20110124T11:43:17 id 9298 reverse xvm6-fea8-3724.ghst.net iface_id 7861 date_updated 20110124T15:54:44 ip 2001:4b98:dc0:543:216:3eff:fea8:3724 datacenter_id 1 state created num 1 version 6 date_created 20110124T15:54:44 id 9301 reverse iface_id date_updated 20110217T17:39:39 ip datacenter_id 1 state being_created num version 4 date_created 20110217T17:39:39 id 9323 reverse xvm-6-26.ghst.net iface_id date_updated 20110225T11:59:55 ip 10.5.6.26 datacenter_id 1 state created num 0 version 4 date_created 20110224T16:46:33 id 9332 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/iface_detach.xml0000664000175000017500000000202412701023453026675 0ustar kamikami00000000000000 iface_id 7857 date_updated 20110921T12:53:29 vm_id 250133 date_start disk_id source AB3917-GANDI step WAIT ip_id date_created 20110921T12:53:29 type iface_detach id 657981 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/disk_update.xml0000664000175000017500000000202412701023453026612 0ustar kamikami00000000000000 iface_id date_updated 20110921T14:23:10 vm_id date_start disk_id 34951 source AB3917-GANDI step WAIT ip_id date_created 20110921T14:23:10 type disk_update id 657987 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/gandi/account_info_rating.xml0000664000175000017500000000213512701023453030334 0ustar kamikami00000000000000 handle AB9090-GANDI rating_enabled 1 date_credits_expiration credits 0 products average_credit_cost share_definition fullname Aymeric BARANTAL id 24 resources available granted used expired apache-libcloud-2.2.1/libcloud/test/compute/fixtures/joyent/0000775000175000017500000000000013160535107024030 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/joyent/my_packages.json0000664000175000017500000000160412701023453027203 0ustar kamikami00000000000000[{"name":"Large 16GB","memory":16384,"disk":491520,"vcpus":3,"swap":32768,"default":false},{"name":"XL 8GB High CPU","memory":8192,"disk":245760,"vcpus":4,"swap":16384,"default":false},{"name":"XL 32GB","memory":32768,"disk":778240,"vcpus":4,"swap":65536,"default":false},{"name":"XXL 48GB","memory":49152,"disk":1048576,"vcpus":8,"swap":98304,"default":false},{"name":"XXXL 64GB ","memory":65536,"disk":1572864,"vcpus":12,"swap":131072,"default":false},{"name":"Medium 1GB High-CPU","memory":1024,"disk":61440,"vcpus":2,"swap":2048,"default":false},{"name":"Small 1GB","memory":1024,"disk":30720,"vcpus":1,"swap":2048,"default":true},{"name":"Medium 2GB","memory":2048,"disk":61440,"vcpus":1,"swap":4096,"default":false},{"name":"Medium 4GB","memory":4096,"disk":122880,"vcpus":1,"swap":8192,"default":false},{"name":"Large 8GB","memory":8192,"disk":245760,"vcpus":2,"swap":16384,"default":false}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/joyent/my_datasets.json0000664000175000017500000002246112701023453027241 0ustar kamikami00000000000000[{"id":"f953e97e-4991-11e1-9ea4-27c6e7e8afda","urn":"sdc:sdc:nodejs:1.3.3","name":"nodejs","os":"smartos","type":"smartmachine","description":"Node.js git-deploy PaaS dataset","default":false,"requirements":{},"version":"1.3.3","created":"2012-02-14T03:54:01+00:00"},{"id":"71101322-43a5-11e1-8f01-cf2a3031a7f4","urn":"sdc:sdc:ubuntu-10.04:1.0.1","name":"ubuntu-10.04","os":"linux","type":"virtualmachine","description":"Ubuntu 10.04 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-22T18:27:32+00:00"},{"id":"7a4f84be-df6d-11e0-a504-3f6609d83831","urn":"sdc:admin:windows2008r2:1.5.0","name":"windows2008r2","os":"windows","type":"virtualmachine","description":"Windows 2008r2 Enterprise Edition","default":false,"requirements":{"max_memory":32768,"min_memory":4096},"version":"1.5.0","created":"2012-02-16T19:31:01+00:00"},{"id":"e4cd7b9e-4330-11e1-81cf-3bb50a972bda","urn":"sdc:sdc:centos-6:1.0.1","name":"centos-6","os":"linux","type":"virtualmachine","description":"Centos 6 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-15T20:04:18+00:00"},{"id":"988c2f4e-4314-11e1-8dc3-2bc6d58f4be2","urn":"sdc:sdc:centos-5.7:1.2.1","name":"centos-5.7","os":"linux","type":"virtualmachine","description":"Centos 5.7 VM 1.2.1","default":false,"requirements":{},"version":"1.2.1","created":"2012-02-14T05:53:49+00:00"},{"id":"e6ac6784-44b3-11e1-8555-87c3dd87aafe","urn":"sdc:sdc:debian-6.03:1.0.0","name":"debian-6.03","os":"linux","type":"virtualmachine","description":"Debian 6.03 VM 1.0.0","default":false,"requirements":{},"version":"1.0.0","created":"2012-02-14T05:21:53+00:00"},{"id":"3f8a3d02-43e4-11e1-9565-7f82a075e289","urn":"sdc:sdc:fedora-14:1.0.1","name":"fedora-14","os":"linux","type":"virtualmachine","description":"Fedora 14 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-14T05:20:52+00:00"},{"id":"d239389c-7535-11e1-b60a-6f75edc139df","urn":"sdc:sdc:mongodb:1.2.4","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.2.4","created":"2012-03-23T22:33:31+00:00"},{"id":"98f38e14-6f83-11e1-bc32-2b9d0a8b6759","urn":"sdc:sdc:mongodb:1.1.1","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.1.1","created":"2012-03-16T16:54:14+00:00"},{"id":"64d81cee-689e-11e1-a130-232647306089","urn":"local:admin:stingray-standard-1gbps:1.2.0","name":"stingray-standard-1gbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Hi-Throughput SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:40+00:00"},{"id":"29a2fb18-689d-11e1-a2a5-47b01f708bb0","urn":"local:admin:stingray-enterprise-1gbps:1.2.0","name":"stingray-enterprise-1gbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Enterprise Hi-Throughput SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:12+00:00"},{"id":"27750b5c-689c-11e1-a67e-6331aba2c777","urn":"local:admin:stingray-enterprise-200mbps:1.2.0","name":"stingray-enterprise-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Enterprise SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:19+00:00"},{"id":"56ffd3bc-689b-11e1-837f-735e255247ac","urn":"local:admin:stingray-standard-200mbps:1.2.0","name":"stingray-standard-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:49+00:00"},{"id":"c79e581a-689a-11e1-91f3-932bbde56874","urn":"local:admin:stingray-lb-200mbps:1.2.0","name":"stingray-lb-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Load Balancer SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:26+00:00"},{"id":"4ca85e3a-689a-11e1-a5df-1b5ffe7065e9","urn":"local:admin:stingray-simple-lb-200mbps:1.2.0","name":"stingray-simple-lb-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Simple Load Balancer SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:33+00:00"},{"id":"8c4c0f30-66df-11e1-a3f4-0f8e0a382b60","urn":"sdc:sdc:percona:1.3.9","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.3.9","created":"2012-03-05T16:41:01+00:00"},{"id":"618d80f8-66d5-11e1-998e-e384c47940f0","urn":"sdc:sdc:mongodb:1.0.9","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.0.9","created":"2012-03-05T15:29:21+00:00"},{"id":"a54da3a0-6319-11e1-a3d9-9fdedd2f9e17","urn":"sdc:sdc:riak:1.5.6","name":"riak","os":"smartos","type":"smartmachine","description":"Riak SmartMachine template","default":false,"requirements":{},"version":"1.5.6","created":"2012-03-01T16:18:13+00:00"},{"id":"81641caa-6321-11e1-a79a-731161c6d519","urn":"local:admin:riakeds:1.5.6","name":"riakeds","os":"smartos","type":"smartmachine","description":"Riak EDS SmartMachine template","default":false,"requirements":{},"version":"1.5.6","created":"2012-03-01T19:52:37+00:00"},{"id":"f4bc70ca-5e2c-11e1-8380-fb28785857cb","urn":"sdc:sdc:smartosplus64:3.1.0","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.1.0","created":"2012-03-02T15:30:58+00:00"},{"id":"a963d5d0-5e29-11e1-a4d7-a31977b1e6dd","urn":"sdc:sdc:smartosplus:3.1.0","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.1.0","created":"2012-03-02T15:24:10+00:00"},{"id":"31bc4dbe-5e06-11e1-907c-5bed6b255fd1","urn":"sdc:sdc:smartos64:1.5.4","name":"smartos64","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.5.4","created":"2012-03-02T15:20:17+00:00"},{"id":"489754f2-5e01-11e1-8ff8-f770c2116b0d","urn":"sdc:sdc:smartos:1.5.4","name":"smartos","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.5.4","created":"2012-03-02T15:16:12+00:00"},{"id":"e05dbcac-1d44-11e1-b8ab-bf1bc04c2d65","urn":"sdc:sdc:smartosplus64:3.0.7","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.7","created":"2012-02-13T19:18:56+00:00"},{"id":"fcc5996a-1d34-11e1-899e-7bd98b87947a","urn":"sdc:sdc:smartosplus:3.0.7","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.7","created":"2012-02-13T19:22:05+00:00"},{"id":"5fef6eda-05f2-11e1-90fc-13dac5e4a347","urn":"sdc:sdc:percona:1.2.2","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.2.2","created":"2012-02-13T19:23:12+00:00"},{"id":"34359ccc-21d2-2e4e-87e8-69fb36412008","urn":"sdc:sdc:windows2008r2standard:1.5.1","name":"windows2008r2standard","os":"windows","type":"virtualmachine","description":"windows2008r2standard VM image","default":false,"requirements":{"max_memory":32768,"min_memory":4096},"version":"1.5.1","created":"2012-03-13T18:25:53+00:00"},{"id":"a9380908-ea0e-11e0-aeee-4ba794c83c33","urn":"sdc:sdc:percona:1.0.7","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.0.7","created":"2012-02-13T19:24:17+00:00"},{"id":"df3589dc-df9a-11e0-a3a3-07ceee3e7d54","urn":"sdc:sdc:smartosplus64:3.0.4","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.4","created":"2012-02-13T19:27:27+00:00"},{"id":"aded640a-df98-11e0-b050-1f55ff3ddfa7","urn":"sdc:sdc:smartosplus:3.0.4","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.4","created":"2012-02-13T19:30:28+00:00"},{"id":"3fcf35d2-dd79-11e0-bdcd-b3c7ac8aeea6","urn":"sdc:sdc:mysql:1.4.1","name":"mysql","os":"smartos","type":"smartmachine","description":"MySQL SmartMachine","default":false,"requirements":{},"version":"1.4.1","created":"2012-02-13T19:32:51+00:00"},{"id":"141194fa-dd77-11e0-8539-27dd8d8264b8","urn":"sdc:sdc:smartos64:1.4.7","name":"smartos64","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.4.7","created":"2012-02-13T19:33:21+00:00"},{"id":"f8ea0bb8-dd75-11e0-87c3-af5352ad3bd6","urn":"sdc:sdc:smartos:1.4.7","name":"smartos","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.4.7","created":"2012-02-13T19:33:50+00:00"}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/joyent/my_machines.json0000664000175000017500000000134412701023453027215 0ustar kamikami00000000000000[{"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc1","type":"virtualmachine","state":"running","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.129","10.112.1.130"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abc== JoyentSSH\n"},"created":"2012-04-11T04:08:32+00:00","updated":"2012-04-11T04:08:42+00:00"}, {"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc2","type":"virtualmachine","state":"running","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.128","10.112.1.131"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abc== Joyent SSH\n", "credentials": {"root": "abc"}},"created":"2012-04-11T04:08:32+00:00","updated":"2012-04-11T04:08:42+00:00"}] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/joyent/my_machines_create.json0000664000175000017500000000054612701023453030543 0ustar kamikami00000000000000{"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc","type":"virtualmachine","state":"provisioning","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.129","10.112.1.130"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abcd== Joyent SSH\n"},"created":"2012-04-11T04:08:27+00:00","updated":"2012-04-11T04:08:27+00:00"} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/0000775000175000017500000000000013160535107024466 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/server_ip.json0000664000175000017500000000024313153541406027357 0ustar kamikami00000000000000{ "id": "01D4A802798AB77AA72DA2D05E1379E1", "ip": "10.5.135.140", "type": "IPV4", "reverse_dns": null, "firewall_policy": null, "load_balancers": [] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy_servers.json0000664000175000017500000000024413153541406032677 0ustar kamikami00000000000000[ { "id": "C72CF0A681B0CCE7EC624DD194D585C6", "name": "My server 1" }, { "id": "4ECD9D188EB457317B2CF8F07885E7B4", "name": "My server 2" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_shared_storages.json0000664000175000017500000000324313153541406031574 0ustar kamikami00000000000000[ { "id": "6AD2F180B7B666539EF75A02FE227084", "size": 200, "state": "ACTIVE", "description": "My shared storage test description", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "cloudpanel_id": "vid35780", "size_used": "0.00", "cifs_path": "vid50995.nas1.lanvid50995", "nfs_path": "vid50995.nas1.lan/:vid50995", "name": "My shared storage test", "creation_date": "2015-05-06T08:33:25+00:00", "servers": [ { "id": "638ED28205B1AFD7ADEF569C725DD85F", "name": "My server 1", "rights": "RW" } ] }, { "id": "4406CE4723BB441C7956E25C51CE8C1B", "size": 50, "state": "ACTIVE", "description": "My shared storage description", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "cloudpanel_id": "vid30534", "size_used": "0.00", "cifs_path": "vid50995.nas1.lanvid50995", "nfs_path": "vid50995.nas1.lan/:vid50995", "name": "My shared storage", "creation_date": "2015-03-17T11:57:48+00:00", "servers": [ ] }, { "id": "1A5418172DD3BD39F8010A6633F1018A", "size": 250, "state": "ACTIVE", "description": null, "cloudpanel_id": "vid19857", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "size_used": "0.00", "cifs_path": "vid50995.nas1.lanvid50995", "nfs_path": "vid50995.nas1.lan/:vid50995", "name": "My shared storage 2", "creation_date": "2015-05-05T09:36:31+00:00", "servers": [ ] } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/shared_storage.json0000664000175000017500000000114313153541406030353 0ustar kamikami00000000000000{ "id": "6AD2F180B7B666539EF75A02FE227084", "size": 200, "state": "ACTIVE", "description": "My shared storage test description", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "cloudpanel_id": "vid35780", "size_used": "0.00", "cifs_path": "vid50995.nas1.lanvid50995", "nfs_path": "vid50995.nas1.lan/:vid50995", "name": "My shared storage test", "creation_date": "2015-05-06T08:33:25+00:00", "servers": [ { "id": "638ED28205B1AFD7ADEF569C725DD85F", "name": "My server 1", "rights": "RW" } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/load_balancer_server_ips.json0000664000175000017500000000015013153541406032365 0ustar kamikami00000000000000{ "server_ips": [ "7C88E50FBC500A3D9D7F94E414255D6B", "7288E50FBC500A3D9D7F94E414255D6B" ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/fixed_instance_sizes.json0000664000175000017500000000226113153541406031563 0ustar kamikami00000000000000[ { "name": "M", "id": "8C626C1A7005D0D1F527143C413D461E", "hardware": { "vcore": 1, "cores_per_processor": 1, "ram": 1, "unit": "GB", "hdds": [ { "size": 40, "unit": "GB", "is_main": true } ] } }, { "name": "L", "id": "8C626C1A7005D0D1F527143C413D461F", "hardware": { "vcore": 2, "cores_per_processor": 1, "ram": 2, "unit": "GB", "hdds": [ { "size": 80, "unit": "GiB", "is_main": true } ] } }, { "name": "XL", "id": "8C626C1A7005D0D1F527143C413D4620", "hardware": { "vcore": 2, "cores_per_processor": 1, "ram": 4, "unit": "GB", "hdds": [ { "size": 120, "unit": "GB", "is_main": true } ] } }, { "name": "XXL", "id": "8C626C1A7005D0D1F527143C413D4621", "hardware": { "vcore": 4, "cores_per_processor": 1, "ram": 8, "unit": "GB", "hdds": [ { "size": 160, "unit": "GiB", "is_main": true } ] } } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/server_hardware.json0000664000175000017500000000030213153541406030540 0ustar kamikami00000000000000{ "fixed_instance_size_id": 0, "vcore": 1, "cores_per_processor": 1, "ram": 2, "hdds": [ { "id": "8C626C1A7005D0D1F527143C413D461E", "size": 40, "is_main": true } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/describe_server.json0000664000175000017500000000174713153541406030541 0ustar kamikami00000000000000{ "id": "srv_1", "cloudpanel_id": "958FA92", "name": "My Server 1", "description": "", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "creation_date": "2015-05-04T06:32:15+00:00", "first_password": "Fg52K21nz8", "status": { "state": "POWERED_OFF", "percent": null }, "hardware": { "fixed_instance_size_id": 0, "vcore": 1, "cores_per_processor": 1, "ram": 2, "hdds": [ { "id": "8C626C1A7005D0D1F527143C413D461E", "size": 40, "is_main": true } ] }, "image": { "id": "3C3B80327CBBD7F0023F793F666C24D0", "name": "w2008r2datacenter64std" }, "dvd": null, "snapshot": null, "ips": [ { "id": "8D135204687B9CF9E79E7A93C096E336", "ip": "10.4.140.213", "type": "IPV4", "reverse_dns": null, "firewall_policy": null, "load_balancers": [] } ], "alerts": [], "monitoring_policy": null, "private_networks": null }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy_port.json0000664000175000017500000000021413153541406032167 0ustar kamikami00000000000000{ "id": "663D21E232530D79E4E584104C400EE4", "protocol": "TCP", "port": 22, "alert_if": "RESPONDING", "email_notifications": true }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/ttt.json0000664000175000017500000000240313153541406026174 0ustar kamikami00000000000000{ "name": "MonitoringPolicy_674", "description": "MonitoringPolicy_674 description", "email": "test-go-sdk@oneandone.com", "agent": true, "thresholds": { "cpu": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "ram": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "disk": { "warning": { "value": 80, "alert": false }, "critical": { "value": 90, "alert": false } }, "transfer": { "warning": { "value": 1000, "alert": false }, "critical": { "value": 2000, "alert": false } }, "internal_ping": { "warning": { "value": 50, "alert": false }, "critical": { "value": 100, "alert": true } } }, "ports": [ { "protocol": "TCP", "port": 443, "alert_if": "NOT_RESPONDING", "email_notification": true } ], "processes": [ { "process": "httpdeamon", "alert_if": "NOT_RUNNING", "email_notification": false } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_firewall_policies.json0000664000175000017500000000245613153541406032120 0ustar kamikami00000000000000[ { "id": "83522FC7DA9172F229E5352C587075BA", "name": "My firewall policy test", "description": "My firewall policy description", "state": "CONFIGURING", "creation_date": "2015-04-29T10:43:11+00:00", "default": 0, "rules": [ { "id": "DA5CC179ED00079AE7DE595F0073D86E", "protocol": "TCP", "port_from": 80, "port_to": 80, "source": "0.0.0.0" }, { "id": "0766EC674A0CD9D4EC0FA0B07978A649", "protocol": "TCP", "port_from": 443, "port_to": 443, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "FW99AA4_7" }, { "id": "83522FC7DA9172F229E5352C587075B9", "name": "My firewall policy test 2", "description": "My firewall policy description", "state": "CONFIGURING", "creation_date": "2015-04-29T10:43:11+00:00", "default": 0, "rules": [ { "id": "DA5CC179ED00079AE7DE595F0073D86F", "protocol": "TCP", "port_from": 80, "port_to": 80, "source": "0.0.0.0" }, { "id": "0766EC674A0CD9D4EC0FA0B07978A64A", "protocol": "TCP", "port_from": 443, "port_to": 443, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "FW99AA4_7" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/load_balancer.json0000664000175000017500000000164713153541406030140 0ustar kamikami00000000000000{ "id": "BD8318616581A9C3C53F94402503230F", "name": "My load balancer", "state": "CONFIGURING", "creation_date": "2015-05-04T07:26:24+00:00", "description": "My load balancer description", "ip": null, "health_check_test": "TCP", "health_check_interval": 40, "health_check_path": null, "health_check_path_parser": null, "persistence": true, "persistence_time": 1200, "method": "ROUND_ROBIN", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "rules": [ { "id": "BCFAF421227674B2B324F779C1163ECB", "protocol": "TCP", "port_balancer": 80, "port_server": 80, "source": "0.0.0.0" }, { "id": "7390C04142800E006FF1B0132FFD8F9A", "protocol": "TCP", "port_balancer": 9999, "port_server": 8888, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "LB99AA4_1" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_servers.json0000664000175000017500000001173313153541406030113 0ustar kamikami00000000000000[ { "id":"8A7D5122BDC173B6E52223878CEF2748", "name":"Docs Content Ubuntu 16.04-1", "description":"", "status":{ "state":"POWERED_ON", "percent":null }, "datacenter":{ "id":"908DC2072407C94C8054610AD5A53B8C", "country_code":"US", "location":"United States of America" }, "creation_date":"2016-10-03T16:44:34+00:00", "image":{ "id":"4DBFA2D31B1A303A9CE0E4ECF8838FDE", "name":"ubuntu1604-64std" }, "hardware":{ "fixed_instance_size_id":"65929629F35BBFBA63022008F773F3EB", "vcore":1, "cores_per_processor":1, "ram":1, "hdds":[ { "id":"F92B0519D2F9A7EFA961F956C4B381B4", "size":40, "is_main":true } ] }, "ips":[ { "id":"9774FB2A6DCC45733B115A98FCD59CC6", "ip":"50.21.182.126" } ], "alerts":null }, { "id":"E7D36EC025C73796035BF4F171379025", "name":"Docs Content Test Server: CentOS 7-1", "description":"", "status":{ "state":"POWERED_ON", "percent":null }, "datacenter":{ "id":"908DC2072407C94C8054610AD5A53B8C", "country_code":"US", "location":"United States of America" }, "creation_date":"2016-05-05T17:39:50+00:00", "image":{ "id":"B5F778B85C041347BCDCFC3172AB3F3C", "name":"centos7-64std" }, "hardware":{ "fixed_instance_size_id":"65929629F35BBFBA63022008F773F3EB", "vcore":1, "cores_per_processor":1, "ram":1, "hdds":[ { "id":"CDB278D95A92CB4C379A9CAAD6759F02", "size":40, "is_main":true } ] }, "ips":[ { "id":"FDBE99EDD57F8596CBF71B6B64BD0A92", "ip":"62.151.179.99" } ], "alerts":null }, { "id":"DDDC4CCA34AAB08132FA1E40F9FEAC25", "name":"App Dev Server 5", "description":"", "status":{ "state":"POWERED_ON", "percent":null }, "datacenter":{ "id":"908DC2072407C94C8054610AD5A53B8C", "country_code":"US", "location":"United States of America" }, "creation_date":"2016-03-04T21:29:00+00:00", "image":{ "id":"96D5CEB497043FD54E834DEC4B8FF70A", "name":"centos7-64cpanel" }, "hardware":{ "fixed_instance_size_id":"65929629F35BBFBA63022008F773F3EB", "vcore":1, "cores_per_processor":1, "ram":1, "hdds":[ { "id":"5E23F849DD3D6A47615D8EE441FE74CC", "size":40, "is_main":true } ] }, "ips":[ { "id":"E193E9D2213088B3CCE8AD69646CEF18", "ip":"70.35.206.196" } ], "alerts":null }, { "id":"D5C5C1D01249DE9B88BE3DAE973AA090", "name":"Docs Test Server: CentOS 7-2", "description":"", "status":{ "state":"POWERED_ON", "percent":null }, "datacenter":{ "id":"908DC2072407C94C8054610AD5A53B8C", "country_code":"US", "location":"United States of America" }, "creation_date":"2016-10-24T18:13:41+00:00", "image":{ "id":"B5F778B85C041347BCDCFC3172AB3F3C", "name":"centos7-64std" }, "hardware":{ "fixed_instance_size_id":"3D4C49EAEDD42FBC23DB58FE3DEF464F", "vcore":1, "cores_per_processor":1, "ram":0.5, "hdds":[ { "id":"35F875003212E7F083728E4072A188B0", "size":30, "is_main":true } ] }, "ips":[ { "id":"1C79CF2066A5AEEACCB97A68FF41DD00", "ip":"74.208.88.88" } ], "alerts":null }, { "id":"FB1765588A90364835782061CE48BA8E", "name":"Docs Content Test Server Ubuntu 16.04-2", "description":"", "status":{ "state":"POWERED_ON", "percent":null }, "datacenter":{ "id":"908DC2072407C94C8054610AD5A53B8C", "country_code":"US", "location":"United States of America" }, "creation_date":"2016-11-28T21:45:50+00:00", "image":{ "id":"4DBFA2D31B1A303A9CE0E4ECF8838FDE", "name":"ubuntu1604-64std" }, "hardware":{ "fixed_instance_size_id":"65929629F35BBFBA63022008F773F3EB", "vcore":1, "cores_per_processor":1, "ram":1, "hdds":[ { "id":"B45F054C54B5AAE9C45253C6D0A47E74", "size":40, "is_main":true } ] }, "ips":[ { "id":"7D8EA6722183DA82AEDFA09AECE243C4", "ip":"70.35.206.233" } ], "alerts":null } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_monitoring_policies.json0000664000175000017500000000575313153541406032503 0ustar kamikami00000000000000[ { "id": "0F9A1604FC80EB625FC6AEE7394893BE", "name": "Default Policy", "description": "Default Policy Description", "default": 1, "state": "ACTIVE", "creation_date": "2014-10-29T07:52:46+00:00", "email": "", "agent": false, "servers": [ { "id": "E83777750130E1111AA89623B9557CAF", "name": "My Server 1" }, { "id": "CB22C6E0428239348A6B70BAE0D67E66", "name": "My Server 2" }, { "id": "6AAA2BD60813F419DEC8D69C6E4F6477", "name": "My Server 3" } ], "thresholds": { "cpu": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "ram": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "transfer": { "warning": { "value": 1000, "alert": false }, "critical": { "value": 2000, "alert": false } }, "internal_ping": { "warning": { "value": 50, "alert": false }, "critical": { "value": 100, "alert": false } } }, "ports": [], "processes": [], "cloudpanel_id": "mo99AA4_1" }, { "id": "C81F17AF6F6F6717372B79A882B25E92", "name": "My monitoring policy", "description": null, "default": 0, "state": "ACTIVE", "creation_date": "2015-02-26T07:23:42+00:00", "email": "", "agent": true, "servers": [], "thresholds": { "cpu": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "ram": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "disk": { "warning": { "value": 80, "alert": false }, "critical": { "value": 90, "alert": false } }, "transfer": { "warning": { "value": 1000, "alert": false }, "critical": { "value": 2000, "alert": false } }, "internal_ping": { "warning": { "value": 50, "alert": false }, "critical": { "value": 100, "alert": false } } }, "ports": [ { "id": "5F67F2706335AF34FFCDF71E5AF08B2B", "protocol": "TCP", "port": "22", "alert_when": false, "alert": true } ], "processes": [ { "id": "13CF2BEADA60230C6241C81C248F3C4B", "process": "test", "alert_when": false, "alert": true } ], "cloudpanel_id": "mo99AA4_2" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy.json0000664000175000017500000000236313153541406031132 0ustar kamikami00000000000000{ "name": "Monitoring Policy", "description": "Monitoring Policy Description", "email": "test@test.com", "agent": false, "thresholds": { "disk": { "warning": { "value": 80, "alert": false }, "critical": { "value": 90, "alert": false } }, "transfer": { "warning": { "value": 1000, "alert": false }, "critical": { "value": 2000, "alert": false } }, "ram": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } }, "internal_ping": { "warning": { "value": 50, "alert": false }, "critical": { "value": 100, "alert": true } }, "cpu": { "warning": { "value": 90, "alert": false }, "critical": { "value": 95, "alert": false } } }, "ports": [ { "alert_if": "NOT_RESPONDING", "email_notification": true, "protocol": "TCP", "port": 443 } ], "processes": [ { "process": "httpdeamon", "alert_if": "NOT_RUNNING", "email_notification": false } ] } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/load_balancer_server_ip.json0000664000175000017500000000014713153541406032210 0ustar kamikami00000000000000{ "id": "7C88E50FBC500A3D9D7F94E414255D6B", "ip": "123.46.79.100", "server_name": "My server 1" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/load_balancer_rule.json0000664000175000017500000000020613153541406031155 0ustar kamikami00000000000000{ "id": "BCFAF421227674B2B324F779C1163ECB", "protocol": "TCP", "port_balancer": 80, "port_server": 80, "source": "0.0.0.0" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/describe_id_firewall_policy.json0000664000175000017500000000010313153541406033054 0ustar kamikami00000000000000{ "id": "3C4F21EDFEEDD6ABB728EA5CE684E1AF", "name": "Windows" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/create_node.json0000664000175000017500000000171313153541406027634 0ustar kamikami00000000000000{ "id": "E7D36EC025C73796035BF4F171379025", "name": "Docs Content Test Server: CentOS 7-1", "description": "My server description", "status": { "state": "POWERED_ON", "percent": null }, "datacenter": { "id": "908DC2072407C94C8054610AD5A53B8C", "country_code": "US", "location": "United States of America" }, "creation_date": "2016-05-05T17:39:50+00:00", "image": { "id": "B5F778B85C041347BCDCFC3172AB3F3C", "name": "centos7-64std" }, "hardware": { "fixed_instance_size_id": "65929629F35BBFBA63022008F773F3EB", "vcore": 1, "cores_per_processor": 1, "ram": 1, "hdds": [ { "id": "CDB278D95A92CB4C379A9CAAD6759F02", "size": 40, "is_main": true } ] }, "ips": [ { "id": "FDBE99EDD57F8596CBF71B6B64BD0A92", "ip": "62.151.179.99" } ], "dvd": null, "snapshot": null, "alerts": [], "monitoring_policy": null, "private_networks": null }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_images.json0000664000175000017500000157330013153541406027673 0ustar kamikami00000000000000[ { "id": "6E1F2C70CCD3EE44ED194F4FFC47C4C9", "name": "w2012r2datacenter64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "B77E19E062D5818532EFF11C747BD104", "name": "w2012r2datacenter64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "7C5FA1D21B98DE39D7516333AAB7DA54", "name": "w2012r2datacenter64std+SQL2012express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "D2127B1C773877A693D718C78181D430", "name": "w2012r2datacenter64std+SQL2012web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "EB2C91E2DC1BA405C5C648ED9B9A7118", "name": "w2012r2datacenter64std+SQL2012std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "71653B14F25069950DD34AF6FE1AFEDD", "name": "Ubuntu 14.04", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "C14988A9ABC34EA64CD5AAC0D33ABCAF", "name": "ubuntu1404-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "72A90ECC29F718404AC3093A3D78327C", "name": "ubuntu1404-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "8E3BAA98E3DFD37857810E0288DD8FBA", "name": "centos6-32min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "E949B8557A0D95E3425088C496FD93F8", "name": "centos6-32std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "650D003D3FC8A8FE554330E869B39FC0", "name": "centos6-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "C598EAD5691CDADD1501A2AF29A2E91C", "name": "centos6-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "DCA5686F1FC5E8A6E0B32E5A68BBB499", "name": "w2008r2datacenter64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "E156EA0B6CE014FEAB8BEB8CC9E3BB77", "name": "w2008r2datacenter64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "3A3752FFAEA4C4F53EE701D29172E332", "name": "w2008r2datacenter64std+SQL2012express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "295D0663A4868F920332DCDC15F395A7", "name": "w2008r2datacenter64std+SQL2012web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "00A1CE098B4C6121A77B23262EAD4D76", "name": "w2008r2datacenter64std+SQL2012std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "E4B7589997B1147CCC5764A204F49E94", "name": "Red Hat 6", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "RedHat6", "os_version": "RedHat", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "F89443811E6952AF6B15E87E647D781A", "name": "w2012r2datacenter64min+SQL2012express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "65F9A55A55A558F977CF55BDC70AF60E", "name": "w2012r2datacenter64min+SQL2012web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "20C66A769CFA8B98D9EDD509A6458641", "name": "w2012r2datacenter64min+SQL2012std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "06E189B1C60CB0F46501E49049C43B05", "name": "w2008r2datacenter64min+SQL2012express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "9717A92E43E2350072A77B00F2DF8CF5", "name": "w2008r2datacenter64min+SQL2012web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "79AFC562A2541DD8F904F8CF918B6899", "name": "w2008r2datacenter64min+SQL2012std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "087BB7F082A7DE2817100EAD53A3845D", "name": "centos6-32std+Plesk12unlimited", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "EC5DCA17E7CF86D968F60C222948C83F", "name": "centos6-64std+Plesk12unlimited", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "B5F778B85C041347BCDCFC3172AB3F3C", "name": "centos7-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "7B9067380CB74BBDFE7F473DEEA2AF5C", "name": "centos7-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "9B9A214D50A7EEE8D9E37295DF834F70", "name": "debian7-32min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 32, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "0D5A4BCDC5B47A33593239C2553F5ADC", "name": "debian7-32std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "CF8318BDB7EFE797C9C769272AAA5F5C", "name": "debian7-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "EBA74A8ED3760AE376F1E26AC6744B3F", "name": "debian7-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "07324CD1A116A7D66B0F56B240F58CD2", "name": "ubuntu1204-32min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 32, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "D86A31798B9D8EB7DFC28FF377EF7E42", "name": "ubuntu1204-32std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "806447A8DC74E7FD56C743877B4A61CD", "name": "ubuntu1204-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "84E3B902821F911BE6B43FA36ADA8199", "name": "ubuntu1204-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "DBC37D7DF1726331AFC8A2153FC80EF0", "name": "CentOS 7", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "61FA286137BD4E113DFEC6EE0BA5BEBD", "name": "FreeBSD 10", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Others", "os": "FreeBSD10", "os_version": "FreeBSD", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "31CEEE904708A5354F0D119CA3E1C3D7", "name": "FreeBSD 10", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Others", "os": "FreeBSD10", "os_version": "FreeBSD", "os_architecture": 32, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "18AE0A62911AD1CC145DF6A4D92BD354", "name": "Red Hat 7", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "RedHat7", "os_version": "RedHat", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "9FB167C5495AC815C3A3CBF273123879", "name": "w2012r2datacenter64std+SQL2014express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "31433BA1341A3EEED81ACE7DDDFA37BD", "name": "w2012r2datacenter64std+SQL2014web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "1921A315152482071F4B36E4D141EC8A", "name": "w2012r2datacenter64std+SQL2014std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "1B53FF641F19052859DA26CF799CC9D2", "name": "w2008r2datacenter64std+SQL2014express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "D5FA67DFD00DB9F4FBFC363374B93306", "name": "w2008r2datacenter64std+SQL2014web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "100E5AC8FA0690EBD9AE6937E5587991", "name": "w2008r2datacenter64std+SQL2014std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "8B7DE48FF94B2407B7455B76F608F8CD", "name": "w2012r2datacenter64min+SQL2014express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "BCA00B58DD301EBC649FEAC96377E7DC", "name": "w2012r2datacenter64min+SQL2014web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "0398F06DE0FF331539842ADFB96279AB", "name": "w2012r2datacenter64min+SQL2014std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "F61E08E34CA0B44904A331A83EED6AE2", "name": "w2008r2datacenter64min+SQL2014express", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "729F1DB5BBB2EEC6EBC6651ABB19AE85", "name": "w2008r2datacenter64min+SQL2014web", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "1EAFCF5E37C3331590AC7E3266ECF12E", "name": "w2008r2datacenter64min+SQL2014std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2008R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Datacenter" } ], "version": null, "categories": [] }, { "id": "0735AA79194B274179A8FADE62208FED", "name": "Solaris 10", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Others", "os": "Solaris10", "os_version": "Solaris", "os_architecture": 32, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "E8F733BBB99D1313652F5690D87B0E77", "name": "Solaris 10", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Others", "os": "Solaris10", "os_version": "Solaris", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "F7E14983198CDC318267A5D0DEA330D8", "name": "centos7-64std+cpanel", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "cPanel 11" } ], "version": null, "categories": [] }, { "id": "6C902E5899CC6F7ED18595EBEB542EE1", "name": "Docker", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 30, "licenses": [], "version": null, "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "8ED1E267A9E1367F731B4AE624FDF1C0", "name": "W2012R2DATACENTER64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "35A31E95A968545236B2584771D3B8CA", "name": "W2012R2DATACENTER64STD+SQL2012EXPRESS+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "698D35B35E01506574B4DA83A095C444", "name": "W2012R2DATACENTER64STD+SQL2012WEB+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "2E79303E264846125F7AD6877AE155B5", "name": "W2012R2DATACENTER64STD+SQL2012STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "F38F08898A742CF3046F8F825C9B199A", "name": "UBUNTU1404-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "AAC4EF1852B735A52D8D531381B81500", "name": "CENTOS7-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "051BC8159D2014F2B2D333E5D9BB671B", "name": "DEBIAN7-32STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "D04943D702F82A1FC29B42FD635ACB51", "name": "DEBIAN7-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "E5712C6F4ACFC3BCADFBBA2C0681E629", "name": "UBUNTU1204-32STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "E935C4BAA226D7179FF3EB753B4F9323", "name": "UBUNTU1204-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "A94D4970684581C3CFC135AE80E1FC39", "name": "W2012R2DATACENTER64STD+SQL2014EXPRESS+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "BD727924A1E53F7D3B9D7B811F339D6B", "name": "W2012R2DATACENTER64STD+SQL2014WEB+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "95736E72DD3FFF9A825FF17ABBB96671", "name": "W2012R2DATACENTER64STD+SQL2014STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "C572494C271ED64142E5FD93E084BBFC", "name": "CentOS 6", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "2D593C41B3EB2BA345AFE806A5BA41A3", "name": "CentOS 6", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "5EDF485861FB263DDBB7C04867346A6E", "name": "Plesk", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "12.5", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE", "POPULAR" ] }, { "id": "96D5CEB497043FD54E834DEC4B8FF70A", "name": "cPanel", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [ { "name": "cPanel 11" } ], "version": null, "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "EAB1D06D1B78654A6E391FAC48BAA4B1", "name": "Sharelock", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.1.0pre-4-r04", "categories": [ "APPLICATION SECURITY", "PASSWORD MANAGEMENT" ] }, { "id": "2D012C80C31413503F1E31355CAA0240", "name": "WildFly", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "10.0.0-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "CACF27D3256071F13E471FCF5C25FCF7", "name": "SimpleInvoices", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2013.1.beta.8-3", "categories": [ "ACCOUNTING" ] }, { "id": "A2302EA99E2A7E894277C4CF124F8951", "name": "PROST", "available_datacenters": [ "4EFAD5836CE43ACA502FD5B99BEE44EF" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 40, "licenses": [], "version": null, "categories": [] }, { "id": "CA020E48664C63D5DBA879774B80632C", "name": "KNOPPIX_7.6.1_32BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Knoppix 7.6.1", "os_version": "Knoppix", "os_architecture": 32, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "E678D53FF49EA13026C8CAE565373858", "name": "KNOPPIX_7.6.1_64BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Knoppix 7.6.1", "os_version": "Knoppix", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "7BCAA13B5B5A9E11B496B3E2A7E49D7B", "name": "Debian 8", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 32, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "590C67467416363FFA6DAEBA939CE9AA", "name": "Debian 8", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "2193D3FE6136ECC25A2BCF5F506478A6", "name": "debian8-32min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 32, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "C4DA59C0E4D5C63E89536ABF309DD942", "name": "debian8-32std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "E9783B668B3E3D3C46A47B68ECA49858", "name": "DEBIAN8-32STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "FF696FFE6FB96FC54638DB47E9321E25", "name": "debian8-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "868B25F5DDC3E110ACDEF034B1B81863", "name": "debian8-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "74B24B20E8DC35548B3A11387C1B3BCD", "name": "DEBIAN8-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "6D9435667B9BFC6583C9D052F0F1D48B", "name": "ubuntu1604-32min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 32, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "4871EEE48030E0E5FAE813B780760BCD", "name": "ubuntu1604-32std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "753E3C1F859874AA74EB63B3302601F5", "name": "ubuntu1604-64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "4DBFA2D31B1A303A9CE0E4ECF8838FDE", "name": "ubuntu1604-64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "5FEDF374F19FE3BFA536DBC0660DDB1E", "name": "Publify", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.0-3", "categories": [ "BLOG" ] }, { "id": "A66EC633504CE600B9D659EA28082732", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20151209-4", "categories": [ "CODE REVIEW" ] }, { "id": "93DF6BD0351A6E1BF698896F95E8CFE4", "name": "REFINERY CMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-8", "categories": [ "CMS" ] }, { "id": "E08D613211F4CFC7367052614BA0E1E7", "name": "Spree", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.7-2", "categories": [ "E-COMMERCE" ] }, { "id": "3DEF9E1E82F8F14B7AC57595990E2E2E", "name": "OPEN EDX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160414-2", "categories": [ "COLLABORATION", "ELEARNING" ] }, { "id": "E8564D74944AC4F005E8CE951340D849", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.5.2.php56-0", "categories": [ "BLOG", "CMS" ] }, { "id": "49130A36E7961805F0F3494D47724625", "name": "CANVAS LMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2016.04.23.03-1", "categories": [ "ELEARNING" ] }, { "id": "040FBEF43407931C74EE074CCEC759AF", "name": "CENTOS6-64STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "FDB280E565E2CC9D210BBE0F7B688E45", "name": "CENTOS6-32STD+PLESK12.5UNLIMITED", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": null, "categories": [] }, { "id": "6044F1B4DCAAB9F877ED7BBE2B6F26BE", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.1.2-0", "categories": [ "CMS" ] }, { "id": "7C48549F3DED9540B7CE75F761C96F62", "name": "PostgreSQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.5.3-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "D15BA431C4EBE5AAA454892309E69A44", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.1-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "274A1F47D79E07587939BF90A7845176", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.28-0", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "D1C6FCCBA91EDEA95B7F3EA3C4B0F91E", "name": "RabbitMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.3-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "332AEAC8F58655EF5C6CFC18BDEAB5BB", "name": "Redmine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.0-1-r05", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "POPULAR", "PROJECT MANAGEMENT" ] }, { "id": "1DDB874AC504E01DF1357D961274BEAE", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.19-1-r05", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "F213BAD2BC86F2383656606D1800070E", "name": "CHT1-GB-IMAGE", "available_datacenters": [ "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 30, "licenses": [], "version": null, "categories": [] }, { "id": "1DC90B8D32CC6DB41FE2ABA027381AEA", "name": "CHT1-DE-IMAGE_20160720", "available_datacenters": [ "4EFAD5836CE43ACA502FD5B99BEE44EF" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 30, "licenses": [], "version": null, "categories": [] }, { "id": "DA40BC2BC45E15117E723E07648AB74C", "name": "CHT1-ES-IMAGE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 30, "licenses": [], "version": null, "categories": [] }, { "id": "FD7D017C0BDD327F3DF206A85699FFF5", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.28-0", "categories": [ "CMS" ] }, { "id": "74FECE8CC262CB31A11F051499B1B30E", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-1", "categories": [ "INFRASTRUCTURE" ] }, { "id": "F84262237686F207C9E961EEBF7F9A05", "name": "MyBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.7-2", "categories": [ "FORUM" ] }, { "id": "FB593AF3F89A8B26B0B6E78F2E232B39", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.6-2", "categories": [ "E-COMMERCE", "POPULAR" ] }, { "id": "DE91B3C9C152C3A9189F170F9F0A8CD0", "name": "MediaWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.26.3-1", "categories": [ "WIKI" ] }, { "id": "2C0D250864B6EA5D0898A20A950D35ED", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.50-2", "categories": [ "CMS" ] }, { "id": "BAA7787F8931D571A572192E3F663384", "name": "Joomla!", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.0-1", "categories": [ "CMS", "POPULAR" ] }, { "id": "CB7DE59A819E818711A9E339E7C9E618", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.0-0", "categories": [ "MEDIA SHARING", "POPULAR" ] }, { "id": "DA24D87F4772BFE5FAB3EB112CA66A90", "name": "SugarCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.24-0", "categories": [ "CRM" ] }, { "id": "C86A0850D804AEB0222E486BE3208983", "name": "WORDPRESS MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.5.3-1", "categories": [ "BLOG", "CMS" ] }, { "id": "64D9900D1EA66D4FE4479B8C34EBB88B", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.5.3-1", "categories": [ "BLOG", "CMS", "POPULAR" ] }, { "id": "6AF74675A2203B02E0E2D717CA9EF9B1", "name": "phpBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.9-2", "categories": [ "FORUM" ] }, { "id": "F6363C60A8D4C25107419C6DC9071FEE", "name": "XOOPS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.7.2-3", "categories": [ "CMS" ] }, { "id": "D41BE971AC2EDC441F81972AA56BE301", "name": "OrangeHRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.2-2", "categories": [ "HUMAN RESOURCE MANAGEMENT" ] }, { "id": "91AF83F9EA73478EEC2DA5D11480FD9C", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.8-1", "categories": [ "INFRASTRUCTURE" ] }, { "id": "94118CB46246C958A542D5E21AE4098E", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.8-3", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "D9A859F560AA227DD07488945459DDFF", "name": "Odoo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.20160620-2", "categories": [ "CRM", "ERP", "POPULAR" ] }, { "id": "AC223698D543228BD97BB355112DC887", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.46v-1", "categories": [ "CHAT" ] }, { "id": "41BA5BD5A162B87C5275854CBF3BC4D3", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.1-1", "categories": [ "ELEARNING", "POPULAR" ] }, { "id": "2C191B48E1B1E678951CA83C7F14BA01", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-4", "categories": [ "INFRASTRUCTURE" ] }, { "id": "A20125D799D01E829AB996B73499501F", "name": "Hadoop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-1", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "C9C4E5912736CF2928BC412E8FD1DC1E", "name": "EXO PLATFORM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-4", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "F823CA021043AE2D08913ED689994F6A", "name": "LET'S CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.4.7-1", "categories": [ "CHAT", "COLLABORATION", "SOCIAL NETWORKING" ] }, { "id": "53133AE1EB34BBD576A17F345CD95467", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.2.0-1", "categories": [ "INFRASTRUCTURE" ] }, { "id": "206A0FB72271162B051E7E6B61E0E3B9", "name": "Liferay", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.1-2", "categories": [ "PORTAL SERVER" ] }, { "id": "3C54989CFAAC3866BA0D09595433B1DF", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.2-2", "categories": [ "CHAT" ] }, { "id": "704E8A5FAF76C2AFF2D6B747F2833E51", "name": "Neos", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.6-3", "categories": [ "CMS" ] }, { "id": "169077A5C3977104306F38F8E5799D21", "name": "Noalyss", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.0.0-4", "categories": [ "ACCOUNTING" ] }, { "id": "137727284539B72CB5CC594BDA11ACCA", "name": "PROCESSMAKER OPEN SOURCE EDITION", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.1.8-3", "categories": [ "BPM", "COLLABORATION" ] }, { "id": "54627DC0DBFBC1CAEAD9CB65B6335E14", "name": "OpenCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.0.0-2", "categories": [ "E-COMMERCE" ] }, { "id": "7E09821C352A16C055216BF337261172", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.54-3", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "75E9BF70942168A9F2A184830AD52C1E", "name": "Pootle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.3-1", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "EC1971DE44DDB26B228AAF7E3E09B0F9", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.1-5", "categories": [ "CMS" ] }, { "id": "9583B74D82356958FA0ECC6919D90336", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.5-1", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "FEB02845D85AA3CC2A044C34EF82990F", "name": "OSQA", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "FORUM" ] }, { "id": "B1E51217B6EACE01AAD94DAD749DA7D7", "name": "OXID ESHOP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.8-1", "categories": [ "E-COMMERCE" ] }, { "id": "57DA7F9377430796E2B460A1AB06CA47", "name": "Osclass", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.1-3", "categories": [ "ONLINE CLASSIFIEDS" ] }, { "id": "E0A1463DB2128078D70A1349AE588334", "name": "REPORTSERVER COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.2-2", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "0D8E437D6AC523E1FC861573A02A7003", "name": "RE:DASH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.1.1834-2", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "C38F0D74C3229E239CCD5ADBA8219D69", "name": "ProcessWire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-3", "categories": [ "CMS" ] }, { "id": "007CB21D4E96A2CB9B793055480CCBCC", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.0-2", "categories": [ "ANALYTICS" ] }, { "id": "29CFB2921C4AABECE924C2BCBFF7529E", "name": "Roundcube", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.4-4", "categories": [ "EMAIL" ] }, { "id": "F60C3C16A160413FF80BD01141C3996A", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.6.1-1", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "41EFA3047599548686F49E8A703F9272", "name": "SIMPLE MACHINES FORUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.0.12-1", "categories": [ "FORUM" ] }, { "id": "8CB1E54FE149D174B1F74A4B997E5685", "name": "Railo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.1.008-5", "categories": [ "APPLICATION SERVER" ] }, { "id": "A9CBCAD23493DCEABE71ED09AB31174D", "name": "Roller", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.1.2-8", "categories": [ "BLOG" ] }, { "id": "5EEAB74822AFF231207DDBDB11B92934", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.0-1", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "0AE06ED19D4FA7BD7B41B799D4B96704", "name": "Sitecake", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.9-4", "categories": [ "CMS" ] }, { "id": "B77B7AEA1DAE36A579A8D81F24E126D2", "name": "Weblate", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5-1", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "2ECD76527ABD16DC6F2BED507912FFFD", "name": "TestLink", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.9.14-4", "categories": [ "DEVELOPER TOOLS", "TESTING" ] }, { "id": "A29E35300D2D6B8DA10498DB8CB08074", "name": "Tracks", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-9", "categories": [ "PLANNING" ] }, { "id": "9F4320B2D44D0FDDC883C4AA7B8F9EA0", "name": "Zurmo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.5-2", "categories": [ "CRM" ] }, { "id": "0D8E3D1102587B91FD1596B9868021D6", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.36-2", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "49AB84DBAD6764D59B5111B7DDCF8EE4", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.3.1-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "9DC423C7DA8A0A4FBD1DE6FBA85C17E2", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "A336C2CFE78C5156ADF556C2F666D1B1", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.19-5", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "A9AAB71F0A765059E0D6D9274781724D", "name": "Plone", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.2-1", "categories": [ "CMS" ] }, { "id": "C9D63C05556580FE1722776B8E5F48DB", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.0-2", "categories": [ "E-COMMERCE", "POPULAR" ] }, { "id": "294D6E256D749277BEF89D1AFEDDB193", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.17-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "MOBILE DEVELOPMENT", "POPULAR" ] }, { "id": "FDAF02F6061C87FD39102336F7625831", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160728-0", "categories": [ "POLL MANAGEMENT" ] }, { "id": "232AF8315BCC0E8D9D626192D2BA64FE", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.1pl-0", "categories": [ "CMS" ] }, { "id": "36A45A12E9214B4DB6B5098A4BD1A451", "name": "Phabricator", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160725-0", "categories": [ "BUG TRACKING", "CODE REVIEW", "PROJECT MANAGEMENT", "WIKI" ] }, { "id": "4639704650A3780F352738601C6989F7", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.16-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "04B99791EC464628DD6290698BC0F17C", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7-0", "categories": [ "CRM" ] }, { "id": "26A7389A2DBDDED651EFAA4E3835F377", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.16.2-0", "categories": [ "ANALYTICS" ] }, { "id": "EC8EA218EC5089910C52D91F1E053F3F", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.3-0", "categories": [ "CRM" ] }, { "id": "7F3616E0B7610AF28BB4D1E617E6D19F", "name": "NODE IMAGE_20160806", "available_datacenters": [ "908DC2072407C94C8054610AD5A53B8C" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 60, "licenses": [], "version": null, "categories": [] }, { "id": "003CE838ACDF71E5B2D43659A096291D", "name": "IMAGE UPDATED NODEJS", "available_datacenters": [ "908DC2072407C94C8054610AD5A53B8C" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 60, "licenses": [], "version": null, "categories": [] }, { "id": "724F9387EB2A18A75E67C48FB046A173", "name": "IMAGE UPDATED NODEJS", "available_datacenters": [ "908DC2072407C94C8054610AD5A53B8C" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 60, "licenses": [], "version": null, "categories": [] }, { "id": "CC714964F6A27F58A7E47485DE02E8AA", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.0-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "11B0EC5C951798C72F8DB7B625D196EA", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.20-3-r17", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "1485D00FBCD691A400E6C43E88807FC7", "name": "ALFRESCO COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "201605-1-r17", "categories": [ "CMS", "DOCUMENT MANAGEMENT", "ECM" ] }, { "id": "AA88C22D5C13568A240A04A3EE6D13C9", "name": "Artifactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-1-r17", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "DFB6F45DE36E4E996D1C682811EF655D", "name": "Ametys", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7.3-2-r17", "categories": [ "CMS" ] }, { "id": "7B92E60DEF5FF17E7CE4E73F1870ADD7", "name": "Cassandra", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7-0-r17", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "705518F8C86BCA65295AB7935801A5E8", "name": "Chyrp", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.2-5-r17", "categories": [ "BLOG" ] }, { "id": "2C4C84938FA20C26942E5A7989423307", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.10-0-r17", "categories": [ "CRM" ] }, { "id": "7C631E3D8043517A2BC2680415B1CC71", "name": "Codiad", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.4-2-r17", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "8ACF1E7FC5A48ABFBC72863A1A45CB8C", "name": "CouchDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1-5-r17", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "07B621A1D899DA27A7C18AAEACE74137", "name": "Coppermine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.42-1-r17", "categories": [ "PHOTO SHARING" ] }, { "id": "A81C74469B600C7628029547FFFAD4EA", "name": "concrete5", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.7.5.9-0-r17", "categories": [ "CMS" ] }, { "id": "7DB0DCB9B0769F2C1747F78362C4CA7A", "name": "Discourse", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.3-0-r17", "categories": [ "FORUM" ] }, { "id": "B5047C5C0BCBC106B1EBE1A66FE180F0", "name": "DokuWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160626a-1-r17", "categories": [ "WIKI" ] }, { "id": "430CAC94ED76DF72315F3F2F8F27F57D", "name": "Dolibarr", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.3-0-r17", "categories": [ "CRM", "ERP" ] }, { "id": "CD13429CD80B961B1ED99F51487E4B2B", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.1.8-0-r17", "categories": [ "CMS", "POPULAR" ] }, { "id": "C2A1BCC54B015AFDDBF4CAE308CA8B8B", "name": "Elasticsearch", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.5-1-r17", "categories": [ "SEARCH" ] }, { "id": "ABE80BF96BB73D8C50D2C2E59F4E9EC7", "name": "ERPNext", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.27.9-3-r17", "categories": [ "CRM", "ERP" ] }, { "id": "63069668ED8B221957F77E0F55EB0BBE", "name": "EZ PUBLISH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2014.11-4-r17", "categories": [ "CMS" ] }, { "id": "84228C364024B3606C50A65DBE772C62", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.9.0-0-r17", "categories": [ "BLOG" ] }, { "id": "9625D6507B627024369B71B3A53FC6A8", "name": "FAT FREE CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.13.6-9-r17", "categories": [ "CRM" ] }, { "id": "AA7033295AC9D017B372ABE83A893326", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.9.6-0-r17", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "207189765354E2C7DDFDD2D3C38040DF", "name": "Hadoop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-1-r17", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "A21A106D20008D432878351CAE872A1D", "name": "HORDE GROUPWARE WEBMAIL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.14-2-r17", "categories": [ "COLLABORATION", "EMAIL" ] }, { "id": "40A54EFAA7E4D2CA3A19D01AC90FCD28", "name": "JasperReports", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.3.0-1-r17", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "4B9E22B093F62EF8FC51A0DC953F3EEE", "name": "JBOSS AS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.2.0-4-r17", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "C38A935EED3E64FDA5E3139BC9DA19D3", "name": "Kafka", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.0.1-0-r17", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "729DA11C172C2A6294E8C322486CF15A", "name": "Joomla!", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.2-0-r17", "categories": [ "CMS" ] }, { "id": "2F3714CC0E84CB99EB54CC2538F58172", "name": "LET'S CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.4.7-1-r17", "categories": [ "CHAT", "COLLABORATION", "SOCIAL NETWORKING" ] }, { "id": "1BDD83C2B2F0C31B17A04A01575E36BB", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.46v-1-r17", "categories": [ "CHAT" ] }, { "id": "B0AD4786940840236B08C35EA90885AF", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.1pl-0-r17", "categories": [ "CMS" ] }, { "id": "F7C18DC4E1B45C71FEC9C47E4A095982", "name": "MyBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.7-2-r17", "categories": [ "FORUM" ] }, { "id": "6C291BF1667016BE4A45E972F1760808", "name": "Neos", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.6-3-r17", "categories": [ "CMS" ] }, { "id": "096B56EBB3AF16E23644E97A9083DED6", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-1-r17", "categories": [ "INFRASTRUCTURE" ] }, { "id": "06D8DDC170731E1E4D53A91D5EA2EA0D", "name": "Noalyss", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.0.0-4-r17", "categories": [ "ACCOUNTING" ] }, { "id": "40AAA103CA41D486E20117C5DD66568A", "name": "Odoo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.20160620-2-r17", "categories": [ "CRM", "ERP" ] }, { "id": "854FD08E82BB00EB0097948AFFADBF13", "name": "OrangeHRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.2-2-r17", "categories": [ "HUMAN RESOURCE MANAGEMENT" ] }, { "id": "D231E0C17032F67F2D8691CE1E726A24", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.19-1-r17", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "992E0B00B702ADEE9E3AE3B0E2C99868", "name": "Osclass", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.1-3-r17", "categories": [ "ONLINE CLASSIFIEDS" ] }, { "id": "B5807A326C68B38FAA9362ACE08AFC40", "name": "OSQA", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "FORUM" ] }, { "id": "3F5FCB4DD7923159B0FD3608E4306C42", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.0-0-r17", "categories": [ "MEDIA SHARING" ] }, { "id": "D9A49296D136AA64C35001A9698B0052", "name": "OXID ESHOP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.8-1-r17", "categories": [ "E-COMMERCE" ] }, { "id": "E142BF5D5408569E9640A61C330B116C", "name": "Phabricator", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160725-0-r17", "categories": [ "BUG TRACKING", "CODE REVIEW", "PROJECT MANAGEMENT", "WIKI" ] }, { "id": "DB1DC9F88E498BFCFC953A5151442756", "name": "phpBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.9-2-r17", "categories": [ "FORUM" ] }, { "id": "03440FD53C0E1FEB3D8540AA0BBED374", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.1-5-r17", "categories": [ "CMS", "E-COMMERCE", "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "84ECBB02E2C800C16E2DC3D95FF6C34E", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.16.2-0-r17", "categories": [ "ANALYTICS" ] }, { "id": "E575B3B1D03A59548B46167757BFED5A", "name": "Plone", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.2-1-r17", "categories": [ "CMS" ] }, { "id": "B4BC4F8E0911B59BF5EE28493A1F0818", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.6-2-r17", "categories": [ "E-COMMERCE" ] }, { "id": "5A2C1381437270EBFEB5DA10C1F62E44", "name": "PROCESSMAKER OPEN SOURCE EDITION", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.1.8-3-r17", "categories": [ "BPM", "COLLABORATION" ] }, { "id": "75B3534710A5B853A6408CA4875DC06D", "name": "ProcessWire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-3-r17", "categories": [ "CMS" ] }, { "id": "BDA10DBDB712CCF46832DC1FB4D0A927", "name": "Pootle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.3-1-r17", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "6FEECAD19E36735195BC43EC302A40F6", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.5-1-r17", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "D82E69BE96A658CE9F20307621988AE9", "name": "RabbitMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.5-0-r17", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "EC6CB19716E4C3815D6E381BD9DD48CC", "name": "Publify", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.0-3-r17", "categories": [ "BLOG" ] }, { "id": "8C7AC5833364516F6F658472CAA8B382", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.3-0-r17", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "DFB169364B2F5AC2D25D14E87E6CE216", "name": "Railo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.1.008-5-r17", "categories": [ "APPLICATION SERVER" ] }, { "id": "CDD88DFFEFC7EE1D8B424EB65EAA0D8B", "name": "Redmine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.0-1-r17", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "PROJECT MANAGEMENT" ] }, { "id": "0F01E141122CB410941492A47CF0B12D", "name": "REPORTSERVER COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.2-2-r17", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "0157F81454776BFBB8D4F5F5839F6E69", "name": "REFINERY CMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-9-r17", "categories": [ "CMS" ] }, { "id": "AB2EE4DB918B602164E3B359F5008346", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.8-3-r17", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "6F55B7A1CB8165416E72FB4EC4B31E45", "name": "Roller", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.1.2-8-r17", "categories": [ "BLOG" ] }, { "id": "F2196ABDFD64B55573B9F0F926467547", "name": "Roundcube", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.4-4-r17", "categories": [ "EMAIL" ] }, { "id": "5E122541ADD7F8C46B90423E61F0CA37", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-4-r17", "categories": [ "INFRASTRUCTURE" ] }, { "id": "FB7C9FDC9DE754EA14072278BA7B8034", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.0-2-r17", "categories": [ "ANALYTICS" ] }, { "id": "C1F702D004852D911D2FC8DD31777A24", "name": "Sitecake", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.9-4-r17", "categories": [ "CMS" ] }, { "id": "808DCE112CB5608E2BB81586C0C2228F", "name": "Spree", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.7-2-r17", "categories": [ "E-COMMERCE" ] }, { "id": "110FED5DC89D9BFBCE7CA6D8375601D8", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20151209-4-r17", "categories": [ "CODE REVIEW" ] }, { "id": "24518F1E3047C04B491B5333A712C9E6", "name": "SugarCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.24-0-r17", "categories": [ "CRM" ] }, { "id": "A518ECEC924E6F099BC4EB724C09277C", "name": "TestLink", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.9.14-4-r17", "categories": [ "DEVELOPER TOOLS", "TESTING" ] }, { "id": "8380A0AAE041BB458FE78BF91018B7A4", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "5899941FA66B700CC78EA06CEBFD7B28", "name": "Tracks", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-9-r17", "categories": [ "PLANNING" ] }, { "id": "6C24674971A1FD051D0C6D2F6E5579B2", "name": "Weblate", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5-1-r17", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "55D16D4D84902505BBCDDDEEFF9BAE17", "name": "Zurmo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.5-2-r17", "categories": [ "CRM" ] }, { "id": "1E66A9D9EDEC0D9324DBF033280B6206", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.4-0", "categories": [ "CRM" ] }, { "id": "D0BF11D16DE12DBB2F03B769F997868E", "name": "PostgreSQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.5.4-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "05B9AE7CE2714F70A8854DADB923E697", "name": "IMAGE UPDATED NODEJS", "available_datacenters": [ "908DC2072407C94C8054610AD5A53B8C" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 32, "os_image_type": null, "type": "MY_IMAGE", "min_hdd_size": 60, "licenses": [], "version": null, "categories": [] }, { "id": "C85BD198EA9364778599592FF39FA049", "name": "HHVM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.9-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "C09108DE96BEAA6184FF366A02E8DE46", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.9-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "620FC561D9F94BB0B0146F168EB85136", "name": "Liferay", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.2-0", "categories": [ "PORTAL SERVER" ] }, { "id": "379CAAB0ED5C4FAFACF9F034A69735CA", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.4-0", "categories": [ "CRM" ] }, { "id": "17A060B98D6E7F25707BF6C71F6096A7", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.1-0", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "837C3D8111893C0C9964E0DDE7971915", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.29-0", "categories": [ "CMS" ] }, { "id": "1373011804EF4A657035889D895226F6", "name": "CMS MADE SIMPLE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-0", "categories": [ "CMS" ] }, { "id": "18378C6A39E088EE7932D07006A7D20C", "name": "Diaspora", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.5.10.2-0", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "12EC2D018628FDAB2ACCDA4D3FA55ADC", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.68-0", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "69E015EC1808F640F1F31F3D3526F0A1", "name": "MediaWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.27.1-0", "categories": [ "WIKI" ] }, { "id": "606DBF39FA1D3E9B6FF7E3B7A39752F3", "name": "AbanteCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.8-0", "categories": [ "E-COMMERCE" ] }, { "id": "DF4F6AB724D6FB458716D651F7394B63", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.3-0", "categories": [ "CHAT" ] }, { "id": "409B5E9CCFCA214FC352C5808ABF96C1", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160817-0", "categories": [ "POLL MANAGEMENT" ] }, { "id": "03DC6FF3F27796E757B203CFDF6BD5FE", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.31-0", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "5EE33F33187A783EFC9051B4DD9E074A", "name": "XOOPS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.8-0", "categories": [ "CMS" ] }, { "id": "4C91108B31F5F77D709B33241148672D", "name": "APACHE SOLR", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.2.0-0", "categories": [ "INFRASTRUCTURE", "SEARCH" ] }, { "id": "A80FF8BFAFD7A03866B197CE4FB1D486", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.18-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "MOBILE DEVELOPMENT" ] }, { "id": "DBE47B145016882FC946689C1BFF8079", "name": "EXO PLATFORM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-8", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "41B2AE576124D0747F53715B3A3E252D", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-0", "categories": [ "DEVELOPER TOOLS", "MOBILE DEVELOPMENT" ] }, { "id": "CF684312E2D8ACD884ACE2263E2FD91E", "name": "OPEN EDX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160414-4", "categories": [ "COLLABORATION", "ELEARNING" ] }, { "id": "FB5FADC8EAB7F8560BA94699211EED2E", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "5EFEB640001CE1B7FDC6AEAEFD4F1C29", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.0-0", "categories": [ "CMS" ] }, { "id": "30C78C33F5241ED6E0A30F8EAC650D7A", "name": "OPENSUSE_LEAP-42.1_64BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "OpenSuse Leap-42.1", "os_version": "OpenSuse", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "55BD7B087A069AAEE78D5FA36588A519", "name": "ARCHLINUX_2016.06.01-DUAL_64BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "ArchLinux 2016.06.01-dual", "os_version": "ArchLinux", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "39725DAAFD90265243241744C29B82F0", "name": "OPENSUSE_LEAP-42.1_64MIN", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "OpenSuse Leap-42.1", "os_version": "OpenSuse", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "FE87E4000FC94A6290599EA813431F9E", "name": "ArchLinux_Rolling_Release_64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "ArchLinux Rolling Release", "os_version": "ArchLinux", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "C5A349786169F140BCBC335675014C08", "name": "CoreOS_Stable_64std", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "CoreOS Stable", "os_version": "CoreOS", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "6AB60C7F28CF9D78191697BAA1A46C4F", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.1-0", "categories": [ "E-COMMERCE" ] }, { "id": "4A518A867FF37731FE64EF86AC98E3FA", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-0", "categories": [ "CMS", "E-COMMERCE", "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "0A82F54809F6AFC04CA371936D5686AC", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.4-0", "categories": [ "CRM" ] }, { "id": "83C5B340E8227A5DA52DE1AD33369C9B", "name": "Elasticsearch", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.0-0", "categories": [ "SEARCH" ] }, { "id": "9DC8914F60D716DFE6733A761EA93828", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.1-2", "categories": [ "ELEARNING" ] }, { "id": "14C799C6DEE937B4317282D1AA126EFA", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.6-0", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "9BC273C4C7E5980FBA0BA3CD4D55E783", "name": "WORDPRESS MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6-1", "categories": [ "BLOG", "CMS" ] }, { "id": "50E477771DC4AC8C1E93E0796EBC12C8", "name": "RE:DASH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.1.2095-0", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "56521A4ED64DD88FA0AF56F086C07C43", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "99044CEE5F93573A653ACC5225431EB7", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.6.1-2", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "F1A014F8CF538A5262A1F5F8C4BD9FDB", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.37-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "0381C291860A658F775DE08A032E8C71", "name": "Sharelock", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.1.0pre-5", "categories": [ "APPLICATION SECURITY", "PASSWORD MANAGEMENT" ] }, { "id": "FFA938A78176DDF7AE790E0C9403741D", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.1-1", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "A673C801E3A479A5D63EA44E49D397BB", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.5.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "B1425A233077C5D769C19F8FFEA2DFB1", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.7-0", "categories": [ "E-COMMERCE" ] }, { "id": "B35BB7EA283240172B46A0BB31026D28", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-0", "categories": [ "BLOG", "CMS" ] }, { "id": "BF6461B167C7A61B012BF9010B1EA0B3", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.1.9-0", "categories": [ "CMS" ] }, { "id": "75192D547D0461CDC97253B8857C1F0C", "name": "OpenCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0.2-0", "categories": [ "E-COMMERCE" ] }, { "id": "F910CE211AD2BC859F41403B94C42EBF", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.11-0", "categories": [ "CRM" ] }, { "id": "90151B99F7AFDE6C4328F1950A3C3EC5", "name": "WildFly", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "10.1.0-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "BBE7C30D9C5DDC1CD1BE1E4B309F38AA", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-1", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "CBD1BB22D10E960001A75C8BF3D1B768", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-2", "categories": [ "INFRASTRUCTURE" ] }, { "id": "7BDABE3C41DD3E20D685D8AC36847880", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.47v-0", "categories": [ "CHAT" ] }, { "id": "DAF026809CF1A91F428F9720CBDE4A61", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.2-0", "categories": [ "ELEARNING" ] }, { "id": "903AF4782C188C069C661D7FDAAE8227", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.19-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "B506755C5F48FA7D2A0D873DF0ED4493", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.1-0", "categories": [ "BLOG" ] }, { "id": "F5E8B877B38E2B03BF7E0E47CF9A7EDE", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.22-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "35A4FD155BB90DDF64C463D7DD0011EA", "name": "ECLIPSE CHE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.2-0", "categories": [ "COLLABORATION", "DEVELOPER TOOLS" ] }, { "id": "1F542168F6AEDEB8831A8F595C2079D0", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.1-0", "categories": [ "CMS", "E-COMMERCE", "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "F60FC0CB8526EAD747972C5AFEC1718C", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.5-0", "categories": [ "CRM" ] }, { "id": "FB7BCD8FDFEFD68F66EC95929CA2EFC6", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.1-0", "categories": [ "CMS" ] }, { "id": "C8E4E0566D199E2E0F217E3ED8562F4C", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.21-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "87C89931FB3CE60F4D5A840471130622", "name": "Codiad", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.5-0", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "CC49145B6893DAC56DF387BD9CE35155", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-2", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "015E1594D0FC841719F59EE2B58C0A87", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.9-0", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "4F7467E3860543E0A8C09CE5304A7354", "name": "APACHE SOLR", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.2.1-0", "categories": [ "INFRASTRUCTURE", "SEARCH" ] }, { "id": "2A96D34A571CFB86E847F6AF6066383C", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.20-0", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "0DE02576F5118B72E621C4189C25A655", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.6.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "719124C209CE44E0FDD9704ADC24A16C", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.7-0", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "3D68AF170B405DEF486951FDE9FC6B8B", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.30-0", "categories": [ "CMS" ] }, { "id": "FA88509561903CD244F4D719111930FB", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-2", "categories": [ "BLOG", "CMS" ] }, { "id": "230F941F659C727882B23D86B0CEF2F3", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-2", "categories": [ "BLOG", "CMS" ] }, { "id": "F2F1C86F7DFA8D00D2F1DC4563A364F7", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.0-1", "categories": [ "BLOG" ] }, { "id": "4D48C7B5DD466847F539E27BA02A8532", "name": "Artifactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-2", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "66564263B7F1A8744DC488D185048333", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-3", "categories": [ "BLOG", "CMS" ] }, { "id": "66AB04CDBA615AA5F60CD6E0D7CB0A52", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-3", "categories": [ "BLOG", "CMS" ] }, { "id": "0BBA2EA144128ADE93DA73580B734CB2", "name": "HHVM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.9-1", "categories": [ "INFRASTRUCTURE" ] }, { "id": "FD27DA95E73982955F6BD7FC2CF3239A", "name": "AbanteCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.8-1", "categories": [ "E-COMMERCE" ] }, { "id": "32B0B16C9988E0DF89D8C214FAB2E2CB", "name": "SugarCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.24-1", "categories": [ "CRM" ] }, { "id": "AD9651BEA3F09D826BE1052FEBE83421", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.4-1", "categories": [ "CRM" ] }, { "id": "02F49EFE3E972965B0E3E8E0CE0ED1B2", "name": "TestLink", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.9.15-1", "categories": [ "DEVELOPER TOOLS", "TESTING" ] }, { "id": "811336E891511883DCFB550C8D0A093C", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.0.M10-1", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "6DB7B9C04829632AB95C790AE3EDE774", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.1-1", "categories": [ "CMS" ] }, { "id": "F5FF86E8EEEC94DA1C40E420824F85A3", "name": "HORDE GROUPWARE WEBMAIL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.14-3", "categories": [ "COLLABORATION", "EMAIL" ] }, { "id": "0EA15A9E181987F9A02E5748CD11AD95", "name": "CouchDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1-6", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "6F09214E7C7A219901B3A5B9276B4E18", "name": "Chyrp", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.2-6", "categories": [ "BLOG" ] }, { "id": "485C6C5D96CE16A126599622C22E1F09", "name": "Codiad", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.5-1", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "2333D742F6CF201C3F6D9755F542838A", "name": "Sharelock", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.1.0pre-6", "categories": [ "APPLICATION SECURITY", "PASSWORD MANAGEMENT" ] }, { "id": "55C69556A332CCD70D0F869FBDA88E45", "name": "concrete5", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.7.5.9-1", "categories": [ "CMS" ] }, { "id": "5A6E68A10ECEDA3F4B6553E48FE8D9CF", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.11-1", "categories": [ "CRM" ] }, { "id": "DF561A899416171BF4F41C2B4E2337FF", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.1-2", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "C4338CA733DFCAA02916F6565D3C55E4", "name": "ALFRESCO COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "201605-2", "categories": [ "CMS", "DOCUMENT MANAGEMENT", "ECM" ] }, { "id": "A3F640404604D33FD444C0E28250E605", "name": "CANVAS LMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2016.04.23.03-2", "categories": [ "ELEARNING" ] }, { "id": "0BF1F3F154DAFD7634966A556BF8864C", "name": "CMS MADE SIMPLE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-1", "categories": [ "CMS" ] }, { "id": "A3995BCEDC91E192E1EA71A4FB74205C", "name": "w2012r2datacenter64min+SQL2016express_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "DD6004F008BEDF3A1313C8BF34CC6D6F", "name": "w2012r2datacenter64min+SQL2016std_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2016 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "51E0E086634F8FC10FE14A5BE56480EB", "name": "w2012r2datacenter64min+SQL2016web_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2016 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "59A47BD0E5B7FEC4428ACD618F59F4D8", "name": "w2012r2datacenter64std+SQL2016express_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "8362DB3921707CD5A82E6785F94CC3FE", "name": "W2012R2DATACENTER64STD+SQL2016EXPRESS+PLESK12.5UNLIMITED_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "D3F03F0A986E284EE694DAB115CCAF9E", "name": "w2012r2datacenter64std+SQL2016std_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2016 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "E049BB25B951285385299614FD1B9D6B", "name": "W2012R2DATACENTER64STD+SQL2016STD+PLESK12.5UNLIMITED_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2016 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "13C42897A0F47333F6F1614BC0F56E27", "name": "w2012r2datacenter64std+SQL2016web_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia SQL Server 2016 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "CB5A3B438AB580B3DD01C535AA1C70D3", "name": "W2012R2DATACENTER64STD+SQL2016WEB+PLESK12.5UNLIMITED_20160415", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2016 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }, { "id": "CF751676613E624750FCE6D9FBDB3DA5", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.6-6", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "138D26AB67933F34FFF19F20D3F0CE08", "name": "Roundcube", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.4-5", "categories": [ "EMAIL" ] }, { "id": "25BDDF69B7E4AC14D8A9943006C98ED8", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.0-3", "categories": [ "ANALYTICS" ] }, { "id": "51FA4394401E6CC561421925FF406394", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.7-1", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "2A1B4817980BA87BBB51535A850E40D6", "name": "Roller", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.1.2-9", "categories": [ "BLOG" ] }, { "id": "E6FCB501149C5F34447B7534B0620818", "name": "DokuWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160626a-2", "categories": [ "WIKI" ] }, { "id": "65DA533E7A371007F7CF3637F6EE488D", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.1-1", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "36D10833D1269A4A0AAC5FAFC6878B1E", "name": "SIMPLE MACHINES FORUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.0.12-2", "categories": [ "FORUM" ] }, { "id": "9C8F83EDB6D2C2D7A8E62303B9914FFA", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.0-2", "categories": [ "BLOG" ] }, { "id": "614B31410B6C25426C5532342079EE5F", "name": "Publify", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.0-4", "categories": [ "BLOG" ] }, { "id": "499D9B4912E9FE23B7412AD128A71904", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.1pl-1-r26", "categories": [ "CMS" ] }, { "id": "9DCE0411BD55D64288C52041B13F4D42", "name": "Phabricator", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2016.30-1", "categories": [ "BUG TRACKING", "CODE REVIEW", "PROJECT MANAGEMENT", "WIKI" ] }, { "id": "A19D5FAFB06D60C86D99DE7C9901FB6D", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.3-1", "categories": [ "CHAT" ] }, { "id": "6D45892CC3E9662983E58E3D87504E93", "name": "OSQA", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "FORUM" ] }, { "id": "37C7CE879415DB34012A71E9C3DB8B12", "name": "OpenCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0.2-1", "categories": [ "E-COMMERCE" ] }, { "id": "024CA48EC98EA4288A61CF3A9FB88AC9", "name": "PROCESSMAKER OPEN SOURCE EDITION", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.1.8-4", "categories": [ "BPM", "COLLABORATION" ] }, { "id": "522E5B898051BB9036B020095E2C5BF6", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.0-0-r26", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "6FC53E7FD6B0E87ABEECD2C4AC4913C5", "name": "Kafka", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.0.1-0-r26", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "FAC87774AA195A103CE53B25C9F45C58", "name": "EXO PLATFORM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-9", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "109748035061094F268A221825EE0184", "name": "AbanteCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.8-1-r26", "categories": [ "E-COMMERCE" ] }, { "id": "7479B1BCD0B133279BBB046552F9CE41", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.20-3-r26", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "E33A6C4BDACC80210CE75C906DC73912", "name": "ALFRESCO COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "201605-2-r26", "categories": [ "CMS", "DOCUMENT MANAGEMENT", "ECM" ] }, { "id": "7C5C2FFA2BD26B29F4A3A8C7FEF9E51F", "name": "Ametys", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7.3-3-r26", "categories": [ "CMS" ] }, { "id": "E7D04F791BCD79B9F181395E35136937", "name": "Cassandra", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7-0-r26", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "C01C0EBFB24718ACD7B6A6FC84E407C4", "name": "Chyrp", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.2-6-r26", "categories": [ "BLOG" ] }, { "id": "586E6422ACF06F7A9B2119DFF370C000", "name": "Artifactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-2-r26", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "9E3166A434D011BFDFECF85D34FEC85D", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.11-1-r26", "categories": [ "CRM" ] }, { "id": "C894255F9F1F7BA5AA87DDE035A0E1E7", "name": "CMS MADE SIMPLE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-1-r26", "categories": [ "CMS" ] }, { "id": "B18FE1A1A217364CC9F650C91FB3D114", "name": "Codiad", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.5-1-r26", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "74A8367BAD5E3B9096B6C1C27229CACA", "name": "concrete5", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.7.5.9-1-r26", "categories": [ "CMS" ] }, { "id": "D2BA96404301AB7863C7D752726D4B09", "name": "DokuWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160626a-2-r26", "categories": [ "WIKI" ] }, { "id": "9CED418F14E2DDCF1DCDAB16F94C6E4E", "name": "Coppermine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.42-3-r26", "categories": [ "PHOTO SHARING" ] }, { "id": "BB4CE46CC533C736E75AE69DBECBFAB5", "name": "CouchDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1-6-r26", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "5FA77ECC752643CB469BBAFB42E1EA60", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-1-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "4225740C3EE365674EBACC37577B5FBB", "name": "Discourse", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.3-1-r26", "categories": [ "FORUM" ] }, { "id": "F8E9CA0DB0B8131162DC57886C9B6FF8", "name": "Diaspora", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.5.10.2-1-r26", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "CAD84CE431231DDFF4B216433A3EFD84", "name": "Dolibarr", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.3-1-r26", "categories": [ "CRM", "ERP" ] }, { "id": "CCA2854F26E5B34BDB0AEE37B0E5AB32", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-4-r26", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "A0A955B6ACABC6A6F09B60F794A3FA07", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.1.10-0-r26", "categories": [ "CMS" ] }, { "id": "0A2487C91D6692A1A088DB4B2A2F3FD2", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.5-1-r26", "categories": [ "CRM" ] }, { "id": "450F03E34E1C15FE9CBA5E1D9042843B", "name": "ERPNext", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.27.9-4-r26", "categories": [ "CRM", "ERP" ] }, { "id": "7E870AB3935AB7A6BC338BF49FE362D2", "name": "ECLIPSE CHE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.2-1-r26", "categories": [ "COLLABORATION", "DEVELOPER TOOLS" ] }, { "id": "40A0B882ED133494234A21C8027E7CA1", "name": "FAT FREE CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.13.6-10-r26", "categories": [ "CRM" ] }, { "id": "28333E559F9400B4256B76821E3EE216", "name": "EZ PUBLISH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2014.11-4-r26", "categories": [ "CMS" ] }, { "id": "9C1B26C4858D2BF07A32C7988B2107C2", "name": "HORDE GROUPWARE WEBMAIL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.14-3-r26", "categories": [ "COLLABORATION", "EMAIL" ] }, { "id": "A6B5EA365A6A0E5B1B3725C50645B7EA", "name": "JBOSS AS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.2.0-5-r26", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "B647F54413F265AAA636C146E291D9C3", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.9.6-1-r26", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "2F2FF25BA9994A4EDE1DEC165524650E", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.658-3-r26", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "68AB5D93197F6F24ECA9457A8EEDAC44", "name": "HHVM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.9-1-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "B48FA6BB24F67D4EDAF7ABD099CFE3C3", "name": "Joomla!", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.2-1-r26", "categories": [ "CMS" ] }, { "id": "70835551BB3D77DA6438365AFFD64ABF", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.5.0-1-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "A825119DA06B32186F7719811C77C74B", "name": "OPEN EDX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160414-5-r26", "categories": [ "COLLABORATION", "ELEARNING" ] }, { "id": "190733566A7DD7C4870938BA72E12C57", "name": "JasperReports", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.3.0-2-r26", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "FF2244A29705A4E259C61758B679ED0D", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160817-1-r26", "categories": [ "POLL MANAGEMENT" ] }, { "id": "DA72CEFA97A80389E586BCEBFE1EFD41", "name": "Liferay", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.2-1-r26", "categories": [ "PORTAL SERVER" ] }, { "id": "3B6523A857FE3B0D327D217EA1849017", "name": "LET'S CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.4.7-2-r26", "categories": [ "CHAT", "COLLABORATION", "SOCIAL NETWORKING" ] }, { "id": "C421D8AFCC4952A143BDE93B5CC63A23", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.1-1-r26", "categories": [ "E-COMMERCE" ] }, { "id": "8C57186CCCE916F3C63E498F77507680", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.31-1-r26", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "C2BADCD828DAFCD3BC8664BE43A8A87D", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.2-1-r26", "categories": [ "ELEARNING" ] }, { "id": "DAAC8D4718C161A80B722600A0440914", "name": "MyBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.7-3-r26", "categories": [ "FORUM" ] }, { "id": "CDBE5B21E22B6412580DC67664C72291", "name": "Noalyss", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.0.0-5-r26", "categories": [ "ACCOUNTING" ] }, { "id": "A9EC52AC8F832E58A09460D2ED0A8DB6", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.7.0-0-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "77AE451E1CB946E2627F5D5C885FB896", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-3-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "604B1B4978D7A968E89DFC7C8CB8CC15", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.68-1-r26", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "5EE93FE704B1287A686FD91761D19FE0", "name": "Neos", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.6-4-r26", "categories": [ "CMS" ] }, { "id": "D2C77CFB66243427F1AF1EC8A97E9F5B", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.30-1-r26", "categories": [ "CMS" ] }, { "id": "F8B64744559AF5E578325F0416D5D02F", "name": "MediaWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.27.1-1-r26", "categories": [ "WIKI" ] }, { "id": "1BAF3057A7D51F5357A38CEB3C2D8EC6", "name": "OrangeHRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.2-3-r26", "categories": [ "HUMAN RESOURCE MANAGEMENT" ] }, { "id": "E48559090D8E7DD4B07D8CC7659BA40D", "name": "Osclass", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.1-4-r26", "categories": [ "ONLINE CLASSIFIEDS" ] }, { "id": "4B9827C6362B5CFD1D7333B4944A707E", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.20-0-r26", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "CD04664F04CE734E4A7A416C1DA72827", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.22-0-r26", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "E25BD5D06B90B8DCE18C9A274878E98D", "name": "OXID ESHOP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.8-2-r26", "categories": [ "E-COMMERCE" ] }, { "id": "15FB054795A16876B439FAC22617DE5F", "name": "phpBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.9-3-r26", "categories": [ "FORUM" ] }, { "id": "C874DF05EB7963D3D8A108083D82450C", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.5-2-r26", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "D622FF6671FC31C5A8BB6C2DA96F006F", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.1-0-r26", "categories": [ "MEDIA SHARING" ] }, { "id": "A4412F4FBE84001FDB6C7C393F7D58AE", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.1-1-r26", "categories": [ "CMS", "E-COMMERCE", "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "944573F3F14219FE74E262BEC37FF026", "name": "Plone", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.2-2-r26", "categories": [ "CMS" ] }, { "id": "6F8CFB4086E728B1C4CA4F61BDB6202A", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.7-1-r26", "categories": [ "E-COMMERCE" ] }, { "id": "5B92154253C2539E3DDC74647D59E342", "name": "ProcessWire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-4-r26", "categories": [ "CMS" ] }, { "id": "8C643C5C69FE463A2E27331452C9FF07", "name": "Pootle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.3-2-r26", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "50131C55AE9E8AAFA4A42E3C1100CD55", "name": "RE:DASH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.1.2095-1-r26", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "12C7C258B191F84783CC112B025516D3", "name": "RabbitMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.5-1-r26", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "C870ADC95648D0519BF18997F00C4C18", "name": "Redmine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.0-2-r26", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "PROJECT MANAGEMENT" ] }, { "id": "E576784F8E28096070D5E549A37C4566", "name": "Railo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.1.008-6-r26", "categories": [ "APPLICATION SERVER" ] }, { "id": "2421422B9BD58E7AF09B3C8FB5311279", "name": "REFINERY CMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-10-r26", "categories": [ "CMS" ] }, { "id": "BD72E2EC5BF93DE7E361B884BDC23F2E", "name": "REPORTSERVER COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.2-3-r26", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "9BC57EEB364E74DF1914E4D64F99A973", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.9-1-r26", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "20A7D9C7F5DD761858AD93A3AA33EE81", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.6.1-3-r26", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "8F3CE0757B455CE5FEA2ABA4B26567BF", "name": "Roundcube", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.4-5-r26", "categories": [ "EMAIL" ] }, { "id": "74EE53B233FF5917CC50F824D081FD00", "name": "Sharelock", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.1.0pre-6-r26", "categories": [ "APPLICATION SECURITY", "PASSWORD MANAGEMENT" ] }, { "id": "449E69D0978086679A33CC18D087E17E", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-5-r26", "categories": [ "INFRASTRUCTURE" ] }, { "id": "DF88FD8F7444603D8FF3EFCE77A7935C", "name": "Roller", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.1.2-9-r26", "categories": [ "BLOG" ] }, { "id": "8475038A28FA128E4E9ED7F29A1292A1", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.1-1-r26", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "89E75566C86760F7EAB776916441D8D2", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.0-3-r26", "categories": [ "ANALYTICS" ] }, { "id": "B87CDB8F374A62D1EA4BECBC7E135853", "name": "Sitecake", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.9-5-r26", "categories": [ "CMS" ] }, { "id": "6B2E9B6F3A78DE4EAFE6618D5B8D921D", "name": "APACHE SOLR", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.2.1-1-r26", "categories": [ "INFRASTRUCTURE", "SEARCH" ] }, { "id": "09E3DB12384FB854754213FFC77DE482", "name": "SugarCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.24-1-r26", "categories": [ "CRM" ] }, { "id": "921F26DEF65CB786A7B2C8C485C62E1F", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.5-0-r26", "categories": [ "CRM" ] }, { "id": "CF272C4B3FFC034C64F9C738F2C63B7C", "name": "Spree", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.7-2-r26", "categories": [ "E-COMMERCE" ] }, { "id": "9978BB99D8F6544B980DB71A113F3F99", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20151209-4-r26", "categories": [ "CODE REVIEW" ] }, { "id": "5588E8E57B717EC72263C14EF0E9D598", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.1-1-r26", "categories": [ "CMS" ] }, { "id": "7B578C5853BBE2D0665503AB117F7581", "name": "Weblate", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5-2-r26", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "554DB66973A0E95243E328D7F5576127", "name": "WildFly", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "10.1.0-1-r26", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "E49BED8A0D73E752152C85DC3C129B1F", "name": "Tracks", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-10-r26", "categories": [ "PLANNING" ] }, { "id": "EA6C685C7D38CCDAC69F26575D6484C7", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.4-1-r26", "categories": [ "CRM" ] }, { "id": "33755C6E102CD1682E0699D2E7587AF3", "name": "Zurmo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.5-3-r26", "categories": [ "CRM" ] }, { "id": "BAEE8A473FA43661825BA06AE51E00E2", "name": "XOOPS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.8-1-r26", "categories": [ "CMS" ] }, { "id": "6631A1589A2CC87FEA9B99AB07399281", "name": "Plesk", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE", "POPULAR" ] }, { "id": "85BD36869EC98AC0B07EE495D27EBF54", "name": "Plesk", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "C937934FFFF33ED2E4F0439FE2B01074", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-4", "categories": [ "BLOG", "CMS" ] }, { "id": "9E6E9411145EC35CF40923492BDBCE6D", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-4", "categories": [ "BLOG", "CMS" ] }, { "id": "50BE5047022F1FCD059BF2222D95C15A", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.4-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "F241D580B2955DC91865C87665AF3D4F", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.48v-0", "categories": [ "CHAT" ] }, { "id": "21A470E06662A886405432183217B088", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.2-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "62D6A1BD29DBECC7F753E64B95735075", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.24-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "8F6E8AE549F9D545CD0FA27AFCEF2E67", "name": "Elasticsearch", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.1-0", "categories": [ "SEARCH" ] }, { "id": "287F758A243DE44F2A1FFF0F942C133E", "name": "Odoo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.20160620-3", "categories": [ "CRM", "ERP" ] }, { "id": "A3916E8922C68CAF6F66DF6C055AC98D", "name": "Dolibarr", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.1-0", "categories": [ "CRM", "ERP" ] }, { "id": "C2A2E81D7E578FCE1CCC8A6168B674A1", "name": "EZ PUBLISH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2014.11-5", "categories": [ "CMS" ] }, { "id": "DFE58E41D26BAE94FC4268C9066FE839", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.10-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "0900040FB9B32C0CB2177DA83DE8C596", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.4-0", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "4B222A6379A5A394CD823165F2A18BC0", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.2-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "DB2814CA4DD9167D5758D8384ED32874", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.12-0", "categories": [ "CRM" ] }, { "id": "15BA7101912E24D4A23F2897ABA73323", "name": "Kafka", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.0.1-1", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "FE4E2E7A6B8EA38A7B1839BEB767A67B", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.1-1", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "EF5ABBAB3F35D8F547F14620B023368C", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.8-0", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "FF6FE275C0D0D3F408E37F4D1B19D496", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.16.5-0", "categories": [ "ANALYTICS" ] }, { "id": "439A6D7EC2D7A3ED853EACCC5866735A", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.51-0", "categories": [ "CMS" ] }, { "id": "BDFBD8E74D32C7DE68371E4D934F9018", "name": "PostgreSQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.6.0-1", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "E786E2D4A3DDF37A0AB43E653D8E07FF", "name": "PROCESSMAKER OPEN SOURCE EDITION", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1-0", "categories": [ "BPM", "COLLABORATION" ] }, { "id": "7A47E863F8E87F483AB514DE6CC2B887", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.3-0", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "C5559C10A3F42C222F073678802BB8E7", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20151209-5", "categories": [ "CODE REVIEW" ] }, { "id": "D2A60C479A551F5FF020FBDE925B0585", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-5", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "4555879BA095784F5041BB16FD85C7FB", "name": "Cassandra", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "78C7F86CB11A65B3A2FFB4209BAA099A", "name": "Elasticsearch", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.1-1", "categories": [ "SEARCH" ] }, { "id": "4C3199325A7C8379069475AE9E806304", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.1-4", "categories": [ "INFRASTRUCTURE" ] }, { "id": "DBB877489986CC2E9724AA0DAEFB5A1D", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.1-1", "categories": [ "BLOG" ] }, { "id": "F3489789EE360F2E0DC7DD8A3FCC0B6B", "name": "APACHE SOLR", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.2.1-2", "categories": [ "INFRASTRUCTURE", "SEARCH" ] }, { "id": "6EE79C7280841BFEF7025CE6C0B6DF69", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.1-1", "categories": [ "MEDIA SHARING" ] }, { "id": "8CD76029C54B67DF75F4ECE45BF7EA2C", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.10-1", "categories": [ "INFRASTRUCTURE" ] }, { "id": "EE60DD9CB8FC0C2BAAC38C137A7B8647", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.1-2", "categories": [ "E-COMMERCE" ] }, { "id": "BCC858E31C52BEAB5E128B27C2DDBDDD", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.37-2", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "4ED9CDACC24CF31F61B40BF2424929EC", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.658-4", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "03946C500881466B8E3A58CB58256088", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.2-2", "categories": [ "ELEARNING" ] }, { "id": "80EB2291B7A3060F610626B4EA2D9150", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.50v-0", "categories": [ "CHAT" ] }, { "id": "5D01A0C474523730E86E53E3EB8F1AB2", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5-0", "categories": [ "CRM" ] }, { "id": "36CE5BB8488C8324C2EA7A14B12B5E14", "name": "Redmine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.1-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "PROJECT MANAGEMENT" ] }, { "id": "C1565D439CCB13BF213B463C050384A4", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.0.M11-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "8C1922EF80B8154B911E752D4B783F27", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.38-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "709C20286041E201EC7CBF9C500D9FC5", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.12.6-0", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "E73BED20AB0CBABFC0C8500CAF19F13A", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.26-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "790666C6069E1CB028BB168E81D44231", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2rc1-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "218C1354E4A57FFC7190B71F4A02152B", "name": "debian8-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian8", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "CE6E8189E50A5CE8ABAA6A736B32C922", "name": "debian7-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Debian7", "os_version": "Debian", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "4963BBC25A4E297E59504D0D155E866A", "name": "ubuntu1604-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu16.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "5EF10B5B04FF47B2C973E03C86C90146", "name": "ubuntu1404-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu14.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "EA16F934113430299C192D18FB4A955F", "name": "ubuntu1204-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Ubuntu12.04", "os_version": "Ubuntu", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "C9E49D5ECFBE559B51D2AF4300F55B60", "name": "centos6-64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos6", "os_version": "Centos", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 20, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" } ], "version": "Onyx", "categories": [] }, { "id": "ACA2989FEFD28042134170C39A593539", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-5", "categories": [ "BLOG", "CMS" ] }, { "id": "FE4A991E5E05C3F692DD7FED33BB54CC", "name": "Joomla!", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.4-0", "categories": [ "CMS" ] }, { "id": "2D18E0FE584505D31A1B0B0ADC4D8E76", "name": "Liferay", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.2-1-r27", "categories": [ "PORTAL SERVER" ] }, { "id": "D40845DB28FDF6DE554219E71C753FEC", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.1-1-r27", "categories": [ "CMS" ] }, { "id": "887ACA72848DEDC26AC2F533E116E9FD", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.4-0-r27", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "1F647C54B53835475AB2F8B8984430D4", "name": "ALFRESCO COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "201605-2-r27", "categories": [ "CMS", "DOCUMENT MANAGEMENT", "ECM" ] }, { "id": "D0C0316634C4BFC185756CB9618A498A", "name": "Ametys", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7.3-3-r27", "categories": [ "CMS" ] }, { "id": "57DDAB5E1C180E9C518466CB5A4397CB", "name": "Artifactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-2-r27", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "A8DA08CB599EB513B2B44248AFE7C805", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.1-1-r27", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "EA9000EEA297CC8A8D0A315937E683BB", "name": "AbanteCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.8-1-r27", "categories": [ "E-COMMERCE" ] }, { "id": "51D50EEFC90F9BDA2F46C38F74E45516", "name": "Cassandra", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9-1-r27", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "FE7960BADDAD5492775FDC7DCCBA44C4", "name": "CANVAS LMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2016.04.23.03-2-r27", "categories": [ "ELEARNING" ] }, { "id": "83EE5C89977B4ADE9929AD49CBC455FD", "name": "Chyrp", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.2-6-r27", "categories": [ "BLOG" ] }, { "id": "D0075666D41915899F45E2D68B8685C4", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.12-0-r27", "categories": [ "CRM" ] }, { "id": "913669D746B522935A52BBD1D6BE9EE1", "name": "CMS MADE SIMPLE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-1-r27", "categories": [ "CMS" ] }, { "id": "789B9F3C11425A29B741E41D14200614", "name": "Codiad", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.5-1-r27", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "6D1C69BD68472AA786ADDF050767C581", "name": "concrete5", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.7.5.9-1-r27", "categories": [ "CMS" ] }, { "id": "36059005F111DE5C274DFEAA1455F523", "name": "Coppermine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.44-0-r27", "categories": [ "PHOTO SHARING" ] }, { "id": "28CCD249C6E06994B2AD27AF3508C77B", "name": "CouchDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1-6-r27", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "F58291DF0E75F69A782CC133A54CBA1A", "name": "Diaspora", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.5.10.2-1-r27", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "D2FD5127D37D16B6A2BE642E26BEB21C", "name": "Discourse", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.5.3-1-r27", "categories": [ "FORUM" ] }, { "id": "93C18F17AAC1A1FC0846CB1E4FCC1A38", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.2-0-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "36F4101089D6D56CA5F2BCE9F3D566A0", "name": "DokuWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160626a-2-r27", "categories": [ "WIKI" ] }, { "id": "BDA26ED7A0563704E0689AA9566EEA94", "name": "Dolibarr", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.1-0-r27", "categories": [ "CRM", "ERP" ] }, { "id": "BA390D602BCB8259245489D2073E0F70", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-0-r27", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "073E21D844CD8AE7D5E85A88E5B0ADA6", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.1-0-r27", "categories": [ "CMS" ] }, { "id": "5F8D1EAE09CFE38D07E6F8A300F41944", "name": "ECLIPSE CHE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.2-1-r27", "categories": [ "COLLABORATION", "DEVELOPER TOOLS" ] }, { "id": "B6482A7609A16457CC0128A9F593F402", "name": "Elasticsearch", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.1-1-r27", "categories": [ "SEARCH" ] }, { "id": "0A40E20C114748BCACA4122691EC5B01", "name": "ELK", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.2-0-r27", "categories": [ "ANALYTICS" ] }, { "id": "A7542A75CCBA7EA033BD89C37C5EAFBA", "name": "ERPNext", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.27.9-4-r27", "categories": [ "CRM", "ERP" ] }, { "id": "D4D15CA145236B2D46676E06A69E46BC", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.7-0-r27", "categories": [ "CRM" ] }, { "id": "1A30D003A8F9F136F2F7076BBB00A10A", "name": "OPEN EDX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160414-5-r27", "categories": [ "COLLABORATION", "ELEARNING" ] }, { "id": "75DEED57C5C9D54CDB04F8A59940FA60", "name": "EXO PLATFORM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-9-r27", "categories": [ "SOCIAL NETWORKING" ] }, { "id": "F6C75E17378E8C27F277B5AE198BFD83", "name": "EZ PUBLISH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2014.11-5-r27", "categories": [ "CMS" ] }, { "id": "CB55CBAE27AFCFF6AC6873154E2AEA36", "name": "FAT FREE CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.13.6-10-r27", "categories": [ "CRM" ] }, { "id": "1D54D070265F32E1EF7AD44BB078E71B", "name": "Ghost", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.2-0-r27", "categories": [ "BLOG" ] }, { "id": "F996523C2BDBBE81AD846E1369B91CED", "name": "HHVM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.9-1-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "0602B7ACDE1086E44DAA638B24BF906A", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.12.6-0-r27", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "F304396E14C579C7E36DB7A2E5120225", "name": "HORDE GROUPWARE WEBMAIL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.14-3-r27", "categories": [ "COLLABORATION", "EMAIL" ] }, { "id": "0DCD6E50B4678FB3DB7E2DC3CD84FE64", "name": "JasperReports", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.3.0-2-r27", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "932E95E4ADE7D2E31BB454B2463A206E", "name": "JBOSS AS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.2.0-5-r27", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "A948D1958BDC57E6F6915FF33BA42183", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.658-4-r27", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "93BABCBC16771C224F1A306427A0B2CE", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.5.0-1-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "8730EDBE19F67FDF19D57DA4F1B8C41E", "name": "Kafka", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.1.0-0-r27", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "22A2B2A1939E119F1B4AB25A818AD87E", "name": "LET'S CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.4.7-2-r27", "categories": [ "CHAT", "COLLABORATION", "SOCIAL NETWORKING" ] }, { "id": "E5ED3D517F9F5283E06ED16F8729565D", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160817-1-r27", "categories": [ "POLL MANAGEMENT" ] }, { "id": "260EE6C3D433B76C494A040DC58530D6", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.50v-0-r27", "categories": [ "CHAT" ] }, { "id": "2E992E333015B1078BCFAD091FFB4D7C", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.2-0-r27", "categories": [ "E-COMMERCE" ] }, { "id": "A391B4AFEC3240C19B79D143CC99299B", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.2-0-r27", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "5B3C79385D2874E64982FF275824DB00", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.10-1-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "96D09B89D74EB4A4ECC16174B903AFCE", "name": "MediaWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.27.1-1-r27", "categories": [ "WIKI" ] }, { "id": "A45BE7259E8E8E1670654741A4EEF979", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.32-0-r27", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "8B0F9491AA8D04AC98DD39E09C34822E", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.1pl-1-r27", "categories": [ "CMS" ] }, { "id": "135D8FC34657ADCF860A787F63A2D47C", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.2-2-r27", "categories": [ "ELEARNING" ] }, { "id": "86555DAC5EF148AC69D31F16AC965FEC", "name": "MyBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.8-0-r27", "categories": [ "FORUM" ] }, { "id": "ADAD28FE5E58081523C7BDC2793247D6", "name": "Neos", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.6-4-r27", "categories": [ "CMS" ] }, { "id": "999B2F775295460CEE9A7BCD837F264E", "name": "NGINX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.2-0-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "EE010A236F461D48D7AC694DFBE09600", "name": "Noalyss", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.0.0-5-r27", "categories": [ "ACCOUNTING" ] }, { "id": "36846B86BA2C748CBEEB6842B0631743", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.1-0-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "2A265DF040DD4C706847D20C5454EE06", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.31-0-r27", "categories": [ "CMS" ] }, { "id": "A0DF3154DEBE85BF224EB4027ABA260A", "name": "Odoo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.20160620-3-r27", "categories": [ "CRM", "ERP" ] }, { "id": "E1F6187500C04E9271BDECEA3AEE43D1", "name": "OpenCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0.2-1-r27", "categories": [ "E-COMMERCE" ] }, { "id": "2CA5CA407EE4E09BCE78253D8DA1BC1B", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.68-1-r27", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "98F17B3730FE36F6380C02DEA6F3FEA3", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.3-1-r27", "categories": [ "CHAT" ] }, { "id": "E2A891BE5CA77A9ED7ABBB119099FEF9", "name": "OrangeHRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.2-3-r27", "categories": [ "HUMAN RESOURCE MANAGEMENT" ] }, { "id": "2DCC330AB48E2917373ABD965997EBB0", "name": "OpenProject", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.4-0-r27", "categories": [ "PROJECT MANAGEMENT" ] }, { "id": "7F2E2EF6CEE47AA376BE134E6107452E", "name": "Osclass", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.1-4-r27", "categories": [ "ONLINE CLASSIFIEDS" ] }, { "id": "0B8EC318B2330E1FE631B891DEDA6E02", "name": "OSQA", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "FORUM" ] }, { "id": "6A516CA2A26E77575A22B2C34A297AD4", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.1-1-r27", "categories": [ "MEDIA SHARING" ] }, { "id": "643A0542807A33BE790DBA85318EAB4E", "name": "OXID ESHOP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.8-2-r27", "categories": [ "E-COMMERCE" ] }, { "id": "46CB70AFB69956F98785CEE2BC974B8C", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.22-0-r27", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "0C05E623FCDDBA29EA0A9510512899B9", "name": "Phabricator", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160725-0-r27", "categories": [ "BUG TRACKING", "CODE REVIEW", "PROJECT MANAGEMENT", "WIKI" ] }, { "id": "FE62077CE6B94E79A64B7ECEC2D36D22", "name": "phpBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.10-0-r27", "categories": [ "FORUM" ] }, { "id": "6BE9421C734A3801D142C71D3D2F1810", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.5-2-r27", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "A4759BD3D10D37A8B23010F49133CC74", "name": "Pimcore", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.1-1-r27", "categories": [ "CMS", "E-COMMERCE", "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "E577C89B9A5A0D976B0131C2781483AE", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.16.5-0-r27", "categories": [ "ANALYTICS" ] }, { "id": "D96BB15FD6E866B9C82DFA34BB033422", "name": "Plone", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.2-2-r27", "categories": [ "CMS" ] }, { "id": "15E7ADAA193205BDD5F4D115340B0546", "name": "PostgreSQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.6.0-1-r27", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "FEB6268FA8334ABF3A9AB7784A613D2F", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.7-1-r27", "categories": [ "E-COMMERCE" ] }, { "id": "790DE3B60A9B7199C2EDB8CD6BD8AAF5", "name": "Pootle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.3-2-r27", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "6BC770366E7763FDD918E9308ABD49DF", "name": "PROCESSMAKER OPEN SOURCE EDITION", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1-0-r27", "categories": [ "BPM", "COLLABORATION" ] }, { "id": "C8397CD4162746D83C3A0486E2CEEB53", "name": "ProcessWire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.2-4-r27", "categories": [ "CMS" ] }, { "id": "4B6CE5EAA7450E8D6B8351604E03FD3B", "name": "RabbitMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.5-1-r27", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "B7047F2B421906363D72570D40094FB9", "name": "Railo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.1.008-6-r27", "categories": [ "APPLICATION SERVER" ] }, { "id": "725461607AA2751F3D1FE5254EA26FBA", "name": "RE:DASH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.11.1.2095-1-r27", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "9575916CB986942D48702C94152C1B84", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.4-0-r27", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "1F44BCE9F0673E7C93A201770E53B8A7", "name": "Redmine", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.3.1-0-r27", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "PROJECT MANAGEMENT" ] }, { "id": "85234119BEEC3A91D632F9D29863B1DE", "name": "REFINERY CMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.5-10-r27", "categories": [ "CMS" ] }, { "id": "31F14B9551930C0279F5D2B11CFFF4CB", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.9-1-r27", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "9E8E066BEE478C6619680A58F4B8F0DD", "name": "REPORTSERVER COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.2-3-r27", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "A17C9E3597DC024BA811898C74BCF7FF", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.7-0-r27", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "BDB18F5D3C57A8E99168DAB13D7F483F", "name": "Roller", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.1.2-9-r27", "categories": [ "BLOG" ] }, { "id": "8D2D7B64222176F365CB23CBF162AE9B", "name": "Roundcube", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.1.4-5-r27", "categories": [ "EMAIL" ] }, { "id": "92D2F75F76F98BC48DF90758FC0C0E48", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.9.0-3-r27", "categories": [ "ANALYTICS" ] }, { "id": "F44D63EF71896A6205A7E09C34E1F651", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-6-r27", "categories": [ "INFRASTRUCTURE" ] }, { "id": "18FF8DF1B8427E513EBD32AB90493EF7", "name": "Sharelock", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.1.0pre-6-r27", "categories": [ "APPLICATION SECURITY", "PASSWORD MANAGEMENT" ] }, { "id": "EF83E8574E591EE3FF905C4AB570FB17", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.9-0-r27", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "7DFED272C70ADC7300A22E098830F991", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.1-1-r27", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "45A2FE7D8A9D935139FA3DF57AF8359E", "name": "SIMPLE MACHINES FORUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.0.12-2-r27", "categories": [ "FORUM" ] }, { "id": "1E75F8044B150AEFD3D0BDB43C38F552", "name": "Sitecake", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.9-5-r27", "categories": [ "CMS" ] }, { "id": "4CA88FAD5196FA4AF79CBBC148917E35", "name": "APACHE SOLR", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.2.1-2-r27", "categories": [ "INFRASTRUCTURE", "SEARCH" ] }, { "id": "81BE4F35DEF135EFB230B538F076121E", "name": "Spree", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.7-3-r27", "categories": [ "E-COMMERCE" ] }, { "id": "C3586557C1F379796E67F2693680A734", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20151209-5-r27", "categories": [ "CODE REVIEW" ] }, { "id": "134A68AE4C2B139CA337D9456B7D37A2", "name": "SugarCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.24-1-r27", "categories": [ "CRM" ] }, { "id": "40A61D89ADF92DBDE26C60C8E2990B06", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.6-0-r27", "categories": [ "CRM" ] }, { "id": "80F15E41CC65E5CD93F40C34D684FE3D", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.38-0-r27", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "71E2388F94E9AC66ECA66B3AC8FF5C7A", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "414FF7AFDE56BF4A4F3AD82F3983122A", "name": "Weblate", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5-2-r27", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "60CFB9AB25B0A0A2146C09072CF1972E", "name": "WildFly", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "10.1.0-1-r27", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "ED6BE262EECE9854D1B93CF817F52849", "name": "Tracks", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.0-10-r27", "categories": [ "PLANNING" ] }, { "id": "3FA2DAA3F0F6055A34E0E79691BBCCDC", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.1-5-r27", "categories": [ "BLOG", "CMS" ] }, { "id": "AF1AAECA3C6585302DF92087F07BEC7F", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.0.4-1-r27", "categories": [ "CRM" ] }, { "id": "3DD71526B5DCF8DC1A9E1520D60C478B", "name": "Zurmo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.5-3-r27", "categories": [ "CRM" ] }, { "id": "622DDE4F402A18D3BBC24C037F2FD850", "name": "XOOPS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.8-1-r27", "categories": [ "CMS" ] }, { "id": "E5C9972F3CB07E717421446F0EE70D3B", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.23-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "3C633FD3A6819B1930F576A361F716FE", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.69-0", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "9762649FC3381B57AF9F374A0A24B446", "name": "Hadoop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.3-3", "categories": [ "DEVELOPER TOOLS" ] }, { "id": "A0FE88B4423A950C7178E1E8FEE00544", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.8-0", "categories": [ "E-COMMERCE" ] }, { "id": "F0DF5BC92C095B06D0D5E007FDDE1211", "name": "TYPO3", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.3.1-2", "categories": [ "CMS" ] }, { "id": "564C2FD4C376935BA280DA2ECAA9872F", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.3-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "BCB1CE1B7EC5065B24496291271BE232", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.17.0-0", "categories": [ "ANALYTICS" ] }, { "id": "429A6D79872F5E04A79B19F5EB654961", "name": "Dolibarr", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.2-0", "categories": [ "CRM", "ERP" ] }, { "id": "D767E69D919A0E838822E15771279A37", "name": "PostgreSQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.6.1-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "F0C0DC4B55E0D421096C86A485C79A19", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.5-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "02FB040E4296670D8C7C3A6C111F011D", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.24-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "5BFDAA22D74B49B4531F97706E7ADE4B", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.3-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "6A2ED08D58800058E230DB53552FA74F", "name": "Phabricator", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2016.30-2", "categories": [ "BUG TRACKING", "CODE REVIEW", "PROJECT MANAGEMENT", "WIKI" ] }, { "id": "437F2F5DE57835301986B59BCF2970CF", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.7-1", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "619EF35946BE591CA126B440ABFDDFD9", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.2-0", "categories": [ "CMS" ] }, { "id": "B01C944993286A8B98983B196AA295CD", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.16-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "5CF6617FB2EFF1C707CABD28F5991282", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.13.3-0", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "C2EC23DFECCA8ADA9E32C6CDE42916BB", "name": "Magento", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.9.3.0-1", "categories": [ "E-COMMERCE" ] }, { "id": "2BB5AD25BCC62B759E62369E27D79F3D", "name": "w2012r2datacenter64std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "F9F1BD7D594EEE31DE33B721BD9A38F9", "name": "w2012r2datacenter64std+SQL2012express+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "15E803427F8EC58265DFB79B5123A903", "name": "w2012r2datacenter64std+SQL2012std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2012 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "40B468FC2D33E617E976D806EE631AA5", "name": "w2012r2datacenter64std+SQL2012web+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2012 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "8E889505BA5C41A6A2C0699D683517F2", "name": "w2012r2datacenter64std+SQL2014express+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "212755161754FDCB16FE6DECE94D1414", "name": "w2012r2datacenter64std+SQL2014std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2014 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "96EE4D855D8518A0F64AFC0988675555", "name": "w2012r2datacenter64std+SQL2014web+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2014 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "0874EF09C70B5A8EBF1172AB8EB51ECA", "name": "w2012r2datacenter64std+SQL2016express+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "98121579738901E3A1EF79418C3CE098", "name": "w2012r2datacenter64std+SQL2016std+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2016 Standard Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "6095163347569A6C1CEBE4FB7867CA23", "name": "w2012r2datacenter64std+SQL2016web+Plesk_Onyx", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "STANDARD", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Licencia Plesk 10.4 - Ilimitados dominios" }, { "name": "Licencia SQL Server 2016 Web Edition" }, { "name": "Windows 2012 Standard" } ], "version": "Onyx", "categories": [] }, { "id": "5611A8434E9126B2DF9FC10BF8B53001", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.2-1", "categories": [ "CMS" ] }, { "id": "5D985C749783B1B5FC90113A13BEE119", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.19.2-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "A95FF22FE64778EAF451D6A36046A785", "name": "REVIEW BOARD", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.7-2", "categories": [ "CODE REVIEW", "DEVELOPER TOOLS" ] }, { "id": "E5C1B4507CB1DB0430614D5A422604B0", "name": "REPORTSERVER COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.2-4", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "E013D508FBA69633C375C8ECBC03E911", "name": "Odoo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.20160620-5", "categories": [ "CRM", "ERP" ] }, { "id": "93D5909615C950358980532840AD3E86", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.7-0", "categories": [ "CRM" ] }, { "id": "2114DEE9FEA37BC82AC663D82AE60DFA", "name": "Railo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.2.1.008-7", "categories": [ "APPLICATION SERVER" ] }, { "id": "557B62C5316CDCF628982FBCC6F2D2C1", "name": "ALFRESCO COMMUNITY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "201605-3", "categories": [ "CMS", "DOCUMENT MANAGEMENT", "ECM" ] }, { "id": "5FC292A268C4357CC6FBA2E62E700A35", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.32-0", "categories": [ "CMS" ] }, { "id": "06A0221F7548F4136DEBAADBB6A6F576", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.6-0", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "9C926DA0F08D31F965F195CB1F94DBF4", "name": "ELK", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.2-1", "categories": [ "ANALYTICS" ] }, { "id": "4C9BB43FD892ADF6DB385500FE87CFFF", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.33-0", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "B4650612AE04EA3DE7B2EE3546AC9C5B", "name": "Moodle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.1.3-0", "categories": [ "ELEARNING" ] }, { "id": "E81504B51585B56D51C34D0ED38B10C1", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.0.M13-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "5ABBB00C180F72FCA4ECF14871C55F15", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.51v-0", "categories": [ "CHAT" ] }, { "id": "489D5ACFC403D415BD3C97E71A3B7713", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.10-0", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "F3F4112F6830DBB517A55A45549167E5", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.73-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "5A6AA19D95342B69FD046E7FF85487F4", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.0.39-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "2A41533352EF507986D2C8C3DE1A50D4", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.6.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "50FE1ACC01FB7ED092944B054766AC6A", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.1.9-0", "categories": [ "E-COMMERCE" ] }, { "id": "9189AA239BFE5077CAC5A922B1E30C72", "name": "ELK", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.6.2-2", "categories": [ "ANALYTICS" ] }, { "id": "61DDF7A768E574910D59DDED450C0EEE", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.2.6-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "6A3271969FCAE8012759546259A0170C", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.2-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "7718D4E06E480AF3287CDBA159638D6E", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.52-0", "categories": [ "CMS" ] }, { "id": "F9892920939E94485B133B8C65649C9C", "name": "Artifactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-3", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "D5F2F4055037BAC658B095EFACB7CC92", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.3-0", "categories": [ "CMS" ] }, { "id": "48E49C57678DB18F168CE0447E780590", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.19.3-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "8F63B0C423FAABB9F56B9A226624DC58", "name": "ResourceSpace", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.9-2", "categories": [ "MEDIA SHARING", "PHOTO SHARING", "VIDEO SHARING" ] }, { "id": "8243CF166147E2B2C97B69F03E19AEE6", "name": "Piwik", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.17.1-0", "categories": [ "ANALYTICS" ] }, { "id": "5034C73770C4E17D069E60951601465E", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.8-0", "categories": [ "CRM" ] }, { "id": "28AE86716FF25A6656A2AF8D5F53981F", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.1-0", "categories": [ "CRM" ] }, { "id": "7A03B4CEBA1FE6DE0B2F1D1148D84D06", "name": "OXID ESHOP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.10.2-0", "categories": [ "E-COMMERCE" ] }, { "id": "4AB0209B3237865B686B653A2921A87E", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20161118-0", "categories": [ "POLL MANAGEMENT" ] }, { "id": "B08F1D394485A25976AE51DA9B69A3BE", "name": "Mean", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.11-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "2FB8FBE70C10C1DD2A23F5B5D64BA7ED", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.2pl-0", "categories": [ "CMS" ] }, { "id": "4B48D90C05C00850288BC1F11103A9B3", "name": "RabbitMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.6-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "761C0A51F741969EEEE854EA7DFE0465", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.13-0", "categories": [ "CRM" ] }, { "id": "444A9048EA382A5931AE5AADF99DBE35", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.14.0-0", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "41E3A00D9D8DCEB2FBA0A6F1D162D039", "name": "Trac", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS", "WIKI" ] }, { "id": "B470B00FEC67D6C20A97EABD7BB38691", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.5-0", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "4FBB834D9DCAC41FB8E86DA188CB406B", "name": "Plone", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.0.6-0", "categories": [ "CMS" ] }, { "id": "3ACDE33A040B8192F0AAE12994D96079", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.3.4-0", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "A0CEA9B96CEB399142012F5964933D7C", "name": "Ruby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.3-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "38F0BFD9C3E66B3B55AD222222E297CC", "name": "Shopware", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.12-0", "categories": [ "CMS", "E-COMMERCE" ] }, { "id": "C97F46107C7A3ED0FFE54326E5B70624", "name": "Noalyss", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.1.5-0", "categories": [ "ACCOUNTING" ] }, { "id": "9A4919E81B2381D475219C3B8D90CA8C", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.7.0.2-0", "categories": [ "E-COMMERCE" ] }, { "id": "B51E94C9649C5AE7469EE6FD88D57CB5", "name": "MongoDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.0-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "3D7E41CEF8BB78A4A9EDDCF13E9E9053", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.5.0-0", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "FCE352FFDAD90E07EE9CF870FF850691", "name": "JFROG ARTIFACTORY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.9.0-4", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "D2B671B044ED0AB98C5DB7842212C11B", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.4-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "E301A548B015F640315033E956E16C7D", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.17-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "44734888C6F064D22F18ED86B1612E9F", "name": "MediaWiki", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.28.0-0", "categories": [ "WIKI" ] }, { "id": "D626FF632CD8D805605CE7D527E00FF7", "name": "ocPortal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.33-0", "categories": [ "CMS" ] }, { "id": "32255D1902F87D53FC91D2773E46B349", "name": "Zurmo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.0-0", "categories": [ "CRM" ] }, { "id": "4273B081213D70DCE14E772017BF4908", "name": "GitLab", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.14.3-0", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "E6B7469813798D0629F1FBA1A87466A0", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.0.4-0", "categories": [ "CHAT" ] }, { "id": "8E8F0CECA9A0F63428B65ABB1834F345", "name": "Jenkins", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.19.4-0", "categories": [ "CONTINUOUS INTEGRATION", "DEVELOPER TOOLS" ] }, { "id": "85D225790B3348445F16801AA3FA9B48", "name": "Redis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.6-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "6F7CECDD1FC26A602118D4A64B118ECD", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.9.2-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "40A5C3FB1FA1B4A2F569844728E85DCF", "name": "Squash", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20160301-0", "categories": [ "CODE REVIEW" ] }, { "id": "51833FE622E5C1D719E1688214D2890C", "name": "DreamFactory", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.1-2", "categories": [ "DEVELOPER TOOLS", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "2FEA5ED118B06C8673F6A03E64BE03A5", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20161205-0", "categories": [ "POLL MANAGEMENT" ] }, { "id": "5C19A6333F441B83FBC5184E605F35AD", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.611-0", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "BB31325CCBC7B60400177BD20EF0919D", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.14-0", "categories": [ "CRM" ] }, { "id": "CF8050EDB22A91AFEF7F3CCD56F5EFB7", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.4-0", "categories": [ "CMS" ] }, { "id": "6E08092CC8B3BB8BA7B3696F962042F9", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.2.1-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "6AC6A6FD6D09CCABA9D8495297984823", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.53-0", "categories": [ "CMS" ] }, { "id": "4518877E49A04BFEAC16BEE6B894F0E3", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.0-0", "categories": [ "CRM" ] }, { "id": "BB210DCBE4A56B5146DF73FE036E5455", "name": "Akeneo", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.6.6-0", "categories": [ "PRODUCT INFORMATION MANAGEMENT" ] }, { "id": "158308EF4AF66D9DF1810F3CBDE6F353", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "D4050B0ADDCFEA2D1C55E5E5AF22FB31", "name": "REFINERY CMS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.5-0", "categories": [ "CMS" ] }, { "id": "5A59D0F511E8A3EA90D64CCB6D13428F", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.2-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "1C8A34EF47193DDDA4B6A816A6F11370", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7-0", "categories": [ "BLOG", "CMS" ] }, { "id": "83D8DF2DAA6D865A0996AEE6129CF364", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7-0", "categories": [ "BLOG", "CMS" ] }, { "id": "EF11A41203805797DCD1974D7B9BEF7B", "name": "WordPress", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.php56-0", "categories": [ "BLOG", "CMS" ] }, { "id": "32D0CA8B499940E5C5FECC91D798809C", "name": "WP MULTISITE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.php56-0", "categories": [ "BLOG", "CMS" ] }, { "id": "BFBB161F64F7FFCF65E92E9BA59D249F", "name": "LAMP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.1.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "E07EDABD073B74A2120A19073B2AAF4E", "name": "LAPP Stack", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.1.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "48EC401FE1E042D7ECF1E77E6E08A1EB", "name": "TINY TINY RSS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20161213-0", "categories": [ "NEWS AGGREGATOR" ] }, { "id": "00D896BA4C60E0EE7DA29304784BD8AF", "name": "Mautic", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.4.0-0", "categories": [ "CRM", "EMAIL", "MARKETING AUTOMATION" ] }, { "id": "2000FB68A23FF6946B357C33ABFFCF45", "name": "PARSE SERVER", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.3.1-0", "categories": [ "DEVELOPER TOOLS", "FRAMEWORK", "Middleware", "MOBILE DEVELOPMENT" ] }, { "id": "148A577540578BBA1F4BC0FD87514266", "name": "OroCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.12-0", "categories": [ "CRM" ] }, { "id": "7CAB3EC8A7B05E23A791F64898FB7339", "name": "MySQL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.6.35-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "9F38B5A4EA5E1B047AC82A7DD5B071CA", "name": "LAPP Stack", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.14-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "51D4F0C6DEB3548F7AF37E0FE89AF64A", "name": "LAMP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0.14-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "F452A101BD2D7BBC5B3C50C426BFFD01", "name": "LAPP Stack", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.6.29-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "2B7F44F10F8FC43235545A20224BF956", "name": "LAMP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.6.29-0", "categories": [ "INFRASTRUCTURE", "POPULAR" ] }, { "id": "3F0573F8D78C7892A6E5B48A99DA4E1B", "name": "Subversion", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.9.5-0", "categories": [ "DEVELOPER TOOLS", "VERSION CONTROL" ] }, { "id": "1A263D2EB4ED1660BF867AEF669B368D", "name": "Mahara", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "16.10.1-0", "categories": [ "COLLABORATION", "ELEARNING", "MEDIA SHARING", "SOCIAL NETWORKING" ] }, { "id": "89AA3CBFD6F4126FC49DD1E338779E24", "name": "TIKI WIKI CMS GROUPWARE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "16.1-0", "categories": [ "CMS", "COLLABORATION", "WIKI" ] }, { "id": "457F2884B4C9325B61DD76D3066EE99B", "name": "Joomla!", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.6.5-0", "categories": [ "CMS" ] }, { "id": "80FE9E61CDFD4478B61E05D45B1833F4", "name": "AbanteCart", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.2.9-0", "categories": [ "E-COMMERCE" ] }, { "id": "0B5BB1BBFB39C28F82DF9804DAF74E61", "name": "Osclass", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.7.0-0", "categories": [ "ONLINE CLASSIFIEDS" ] }, { "id": "FBCDC34C894FAD01D674D32C7034F1BF", "name": "HORDE GROUPWARE WEBMAIL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.2.17-0", "categories": [ "COLLABORATION", "EMAIL" ] }, { "id": "5D0E751261760562752B5F2C2135F076", "name": "SEO PANEL", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.10.0-0", "categories": [ "ANALYTICS" ] }, { "id": "0CB00AF85ACD62669FB67F979CD608C7", "name": "SilverStripe", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.5.1-0", "categories": [ "CMS", "FRAMEWORK" ] }, { "id": "7A28641A3FB33FE2A7FC5C1B45FC8970", "name": "Ametys", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.8.1-0", "categories": [ "CMS" ] }, { "id": "E29812D5DDF322090E1C082CD5A7E7FE", "name": "OPEN ATRIUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.612-0", "categories": [ "CMS", "PROJECT MANAGEMENT" ] }, { "id": "B64AF1A8DC39615CC0E3F6E72FC612A8", "name": "ActiveMQ", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.14.3-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "6AACE44F36E2C8E021B3E3C7EAF81A47", "name": "EspoCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.3.1-0", "categories": [ "CRM" ] }, { "id": "12DE0ED80AFC971C9A021D722A7C6B3A", "name": "X2ENGINE SALES CRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "6.5.2-0", "categories": [ "CRM" ] }, { "id": "9F078FEB476BFE64504A7B14178A8EE1", "name": "Kafka", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.10.1.1-0", "categories": [ "DEVELOPER TOOLS", "INFRASTRUCTURE" ] }, { "id": "7B9A2EA0BDBBCDB5A652FE0D07CF2FBF", "name": "OpenERP", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.0-25", "categories": [ "CRM", "ERP" ] }, { "id": "848F2BB1B6B278B4025B3606598123C3", "name": "SimpleInvoices", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2013.1.beta.8-6", "categories": [ "ACCOUNTING" ] }, { "id": "C01A9B6232FFC487039718DD17BE80D7", "name": "Openfire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.1.1-0", "categories": [ "CHAT" ] }, { "id": "C7F49E3713D9AE033539D628D737A577", "name": "Django", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.5-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "5753041EAF625B748F5E73CAB8DFAAC7", "name": "ERPNext", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.2.8-0", "categories": [ "CRM", "ERP" ] }, { "id": "517DCE1A168888C1B2C4C90FF7DCDE62", "name": "MyBB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.8.10-0", "categories": [ "FORUM" ] }, { "id": "9D2C3D429B0D54F252A103E7AB6C7200", "name": "SIMPLE MACHINES FORUM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.0.13-0", "categories": [ "FORUM" ] }, { "id": "2EE86FEC02DB90BC3119E5DA2CF7000A", "name": "JRuby", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.7.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "1A079A22FEA69AF9F8E7C64EA75138E8", "name": "concrete5", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "5.7.5.13-0", "categories": [ "CMS" ] }, { "id": "EEAB5D3832E8C49BBA49340532201FD9", "name": "Memcached", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.4.34-0", "categories": [ "CACHE", "INFRASTRUCTURE" ] }, { "id": "6AF9F672FE5548FD26BCBFEFF9FD26F5", "name": "MongoDB", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.4.1-0", "categories": [ "DATABASE", "INFRASTRUCTURE" ] }, { "id": "68336B1D12E86FB7CEF51E99A2C74910", "name": "NODE.JS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.4.0-0", "categories": [ "INFRASTRUCTURE" ] }, { "id": "347F8594499DE0DCE50DF8510C29478A", "name": "Tomcat", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.0.0.M17-0", "categories": [ "APPLICATION SERVER", "INFRASTRUCTURE" ] }, { "id": "4A76701EC59D8F1604C34A16F98C12D9", "name": "RE:DASH", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "0.12.0.2449-0", "categories": [ "ANALYTICS", "BUSINESS INTELLIGENCE" ] }, { "id": "FB0F1B12B84912918C73FEB83FF2732E", "name": "Pootle", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.7.6-0", "categories": [ "TRANSLATION TOOLS" ] }, { "id": "3E0D2DDAAA01CF64B46CA3DA2A9E9009", "name": "COREOS_1185.3.0_64BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "CoreOS 1185.3.4", "os_version": "CoreOS", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] }, { "id": "37DBEACD9864AF6C874692DDBAB9693C", "name": "SuiteCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "7.7.9-1", "categories": [ "CRM" ] }, { "id": "4F9916EAB74EB67E59FFBBFA8FC862A6", "name": "PrestaShop", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.7.0.4-1", "categories": [ "E-COMMERCE" ] }, { "id": "47A1E0F445CF1B3EAF7AA37BFEC99765", "name": "JFROG ARTIFACTORY", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.16.0-1", "categories": [ "BINARY REPOSITORY", "DEVELOPER TOOLS" ] }, { "id": "7254215DF4C13DDDB71ADA583A2559C9", "name": "Mantis", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.0.0-1", "categories": [ "BUG TRACKING", "DEVELOPER TOOLS" ] }, { "id": "6E60B13E6F1D73C2567F1B4B864D75FD", "name": "CiviCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "4.7.15-1", "categories": [ "CRM" ] }, { "id": "A0AC3AB893615424CD2CE6E019EAA50A", "name": "Mautic", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.1-1", "categories": [ "CRM", "EMAIL", "MARKETING AUTOMATION" ] }, { "id": "E3659A006A5181269F9FEBBC336DB1BD", "name": "LIVE HELPER CHAT", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.52v-1", "categories": [ "CHAT" ] }, { "id": "FEFFFEBBDD0468090534DC4F40161EB5", "name": "Mahara", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "16.10.2-1", "categories": [ "COLLABORATION", "ELEARNING", "MEDIA SHARING", "SOCIAL NETWORKING" ] }, { "id": "46ED61EA8D92FAF6DC63EF89FCB2005F", "name": "Drupal", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "8.2.5-1", "categories": [ "CMS" ] }, { "id": "BFB384CE8C41362CE7291CB4BC5AE3AA", "name": "ProcessWire", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.0.42-2", "categories": [ "CMS" ] }, { "id": "64E1B8118643E5CF588E16A825674224", "name": "phpList", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "3.2.7-1", "categories": [ "NEWSLETTER MANAGER" ] }, { "id": "C46867ACABEA255496F239853BB2D8F4", "name": "XOOPS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.8.1-1", "categories": [ "CMS" ] }, { "id": "D5B67125994531E839E4EE7426734D44", "name": "OroCRM", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "1.10.12-2", "categories": [ "CRM" ] }, { "id": "3E6F9D3607CC042AAC0424E3484C3A5E", "name": "LimeSurvey", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "20170116-0", "categories": [ "POLL MANAGEMENT" ] }, { "id": "BBF529CD3F61C3774E80A9E2ECA8BBA8", "name": "ownCloud", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "9.1.3-1", "categories": [ "MEDIA SHARING" ] }, { "id": "237298EF69A39DE18CF515FD9E1D28E8", "name": "MODX", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.5.4pl-1", "categories": [ "CMS" ] }, { "id": "3D089CC7EBB7BAE63F54FDF38E0E6A92", "name": "CMS MADE SIMPLE", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "Centos7", "os_version": "Centos", "os_architecture": 64, "os_image_type": null, "type": "APPLICATION", "min_hdd_size": 20, "licenses": [], "version": "2.1.6-1", "categories": [ "CMS" ] }, { "id": "64BDA5E918D983BF02CAD55C0E738080", "name": "OPENSUSE_LEAP-42.2_64MIN", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "OpenSuse Leap-42.2", "os_version": "OpenSuse", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 20, "licenses": [], "version": null, "categories": [] }, { "id": "34D8E058F1EE61D056BE1F6704EB18A7", "name": "OPENSUSE_LEAP-42.2_64BITS", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Linux", "os": "OpenSuse Leap-42.2", "os_version": "OpenSuse", "os_architecture": 64, "os_image_type": "ISO_OS", "type": "ISO", "min_hdd_size": null, "licenses": [], "version": null, "categories": [] } ] apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy_ports.json0000664000175000017500000000047613153541406032364 0ustar kamikami00000000000000[ { "id": "663D21E232530D79E4E584104C400EE4", "protocol": "TCP", "port": 22, "alert_if": "RESPONDING", "email_notifications": true }, { "id": "663D21E232530D79E4E584104C400AAA", "protocol": "TCP", "port": 80, "alert_if": "NOT_RESPONDING", "email_notifications": true } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy_processes.json0000664000175000017500000000044413153541406033216 0ustar kamikami00000000000000[ { "id": "663D21E232530D79E4E584104C400EE4", "process": "iexplorer", "alert_if": "NOT_RUNNING", "email_notifications": true }, { "id": "663D21E232530D79E4E584104C400EE4", "process": "winword", "alert_if": "NOT_RUNNING", "email_notifications": true } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/describe_firewall_policy.json0000664000175000017500000000113713153541406032410 0ustar kamikami00000000000000{ "id": "83522FC7DA9172F229E5352C587075BA", "name": "My firewall policy test", "description": "My firewall policy description", "state": "CONFIGURING", "creation_date": "2015-04-29T10:43:11+00:00", "default": 0, "rules": [ { "id": "DA5CC179ED00079AE7DE595F0073D86E", "protocol": "TCP", "port_from": 80, "port_to": 80, "source": "0.0.0.0" }, { "id": "0766EC674A0CD9D4EC0FA0B07978A649", "protocol": "TCP", "port_from": 443, "port_to": 443, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "FW99AA4_7" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/load_balancer_rules.json0000664000175000017500000000046213153541406031344 0ustar kamikami00000000000000[ { "id": "BCFAF421227674B2B324F779C1163ECB", "protocol": "TCP", "port_balancer": 80, "port_server": 80, "source": "0.0.0.0" }, { "id": "7390C04142800E006FF1B0132FFD8F9A", "protocol": "TCP", "port_balancer": 9999, "port_server": 8888, "source": "0.0.0.0" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/server_ips.json0000664000175000017500000000026613153541406027547 0ustar kamikami00000000000000[ { "id": "01D4A802798AB77AA72DA2D05E1379E1", "ip": "10.5.135.140", "type": "IPV4", "reverse_dns": null, "firewall_policy": null, "load_balancers": [] } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/get_server_image.json0000664000175000017500000000011113153541406030662 0ustar kamikami00000000000000{ "id": "76EBF29C1250167C8754B2B3D1C05F68", "name": "centos7-64std" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_public_ips.json0000664000175000017500000000265013153541406030551 0ustar kamikami00000000000000[ { "id": "569FA2EC06DD48C9E8635F3384A018DB", "ip": "10.5.138.52", "type": "IPV4", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "assigned_to": { "id": "B23F1B4F84E983B4FEDD5459E877058A", "name": "My load balancer", "type": "LOAD_BALANCER" }, "reverse_dns": null, "is_dhcp": false, "state": "ACTIVE", "creation_date": "2015-03-03T11:12:10+00:00" }, { "id": "6F033A13C3EFCE0FB60783280A118D63", "ip": "10.5.133.191", "type": "IPV4", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "assigned_to": { "id": "BDAF0EC6A36E9E554B80B7E7365821F5", "name": "My Server 1", "type": "SERVER" }, "reverse_dns": null, "is_dhcp": true, "state": "ACTIVE", "creation_date": "2015-05-04T07:21:37+00:00" }, { "id": "39C73A57B33DAF2F9FA1EBEA4C301FCF", "ip": "10.5.138.81", "type": "IPV4", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "assigned_to": { "id": "B23F1B4F84E983B4FEDD5459E877058A", "name": "My load balancer", "type": "LOAD_BALANCER" }, "reverse_dns": null, "is_dhcp": false, "state": "ACTIVE", "creation_date": "2015-03-03T11:13:16+00:00" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/ex_list_datacenters.json0000664000175000017500000000111213153541406031401 0ustar kamikami00000000000000[ { "id":"81DEF28500FBC2A973FC0C620DF5B721", "location":"Spain", "country_code":"ES", "default":0 }, { "id":"908DC2072407C94C8054610AD5A53B8C", "location":"United States of America", "country_code":"US", "default":1 }, { "id":"4EFAD5836CE43ACA502FD5B99BEE44EF", "location":"Germany", "country_code":"DE", "default":0 }, { "id":"5091F6D8CBFEF9C26ACE957C652D5D49", "location":"United Kingdom of Great Britain and Northern Ireland", "country_code":"GB", "default":0 } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/get_image.json0000664000175000017500000000107113153541406027302 0ustar kamikami00000000000000{ "id": "6E1F2C70CCD3EE44ED194F4FFC47C4C9", "name": "w2012r2datacenter64min", "available_datacenters": [ "81DEF28500FBC2A973FC0C620DF5B721", "908DC2072407C94C8054610AD5A53B8C", "4EFAD5836CE43ACA502FD5B99BEE44EF", "5091F6D8CBFEF9C26ACE957C652D5D49" ], "os_family": "Windows", "os": "Windows2012R2", "os_version": "WindowsDatacenter", "os_architecture": 64, "os_image_type": "MINIMAL", "type": "IMAGE", "min_hdd_size": 40, "licenses": [ { "name": "Windows 2012 Standard" } ], "version": null, "categories": [] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/monitoring_policy_process.json0000664000175000017500000000020413153541406032660 0ustar kamikami00000000000000{ "id": "663D21E232530D79E4E584104C400EE4", "process": "iexplorer", "alert_if": "NOT_RUNNING", "email_notifications": true }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/list_load_balancer.json0000664000175000017500000000377413153541406031176 0ustar kamikami00000000000000[ { "id": "B23F1B4F84E983B4FEDD5459E877058A", "name": "My load balancer", "state": "CONFIGURING", "creation_date": "2015-03-03T11:12:00+00:00", "description": "My load balancer description", "ip": "10.5.138.52", "health_check_test": "TCP", "health_check_interval": 40, "health_check_path": null, "health_check_path_parser": null, "persistence": true, "persistence_time": 1200, "method": "ROUND_ROBIN", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "rules": [ { "id": "D7A0A7C445D844848EBB439C28B46183", "protocol": "TCP", "port_balancer": 22, "port_server": 22, "source": "0.0.0.0" }, { "id": "9AE836CDE379EEB361287F07B3300F84", "protocol": "TCP", "port_balancer": 85, "port_server": 85, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "LB99AA4_1" }, { "id": "B23F1B4F84E983B4FEDD5459E877058B", "name": "My Load Balancer II", "state": "CONFIGURING", "creation_date": "2015-03-03T11:12:00+00:00", "description": "My load balancer II description", "ip": "10.5.138.53", "health_check_test": "TCP", "health_check_interval": 40, "health_check_path": null, "health_check_path_parser": null, "persistence": true, "persistence_time": 1200, "method": "ROUND_ROBIN", "region": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "rules": [ { "id": "D7A0A7C445D844848EBB439C28B46184", "protocol": "TCP", "port_balancer": 22, "port_server": 22, "source": "0.0.0.0" }, { "id": "9AE836CDE379EEB361287F07B3300F85", "protocol": "TCP", "port_balancer": 9999, "port_server": 9999, "source": "0.0.0.0" } ], "server_ips": [], "cloudpanel_id": "LB99AA4_1" } ]apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/public_ip.json0000664000175000017500000000053513153541406027333 0ustar kamikami00000000000000{ "id": "44972922D045B9648118AA80FF2A51C9", "ip": "10.4.140.155", "type": "IPV4", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "assigned_to": null, "reverse_dns": "example.com", "is_dhcp": false, "state": "ACTIVE", "creation_date": "2015-05-06T08:17:33+00:00" }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/describe_shared_stoage.json0000664000175000017500000000133513153541406032034 0ustar kamikami00000000000000{ "id": "6AD2F180B7B666539EF75A02FE227084", "size": 200, "state": "ACTIVE", "description": "My shared storage description", "datacenter": { "id": "D0F6D8C8ED29D3036F94C27BBB7BAD36", "location": "USA", "country_code": "US" }, "cloudpanel_id": "vid35780", "size_used": "0.00", "cifs_path": "\\vid50995.nas1.lan\vid50995", "nfs_path": "vid50995.nas1.lan/:vid50995", "name": "My shared storage test rename", "creation_date": "2015-05-06T08:33:25+00:00", "servers": [ { "id": "638ED28205B1AFD7ADEF569C725DD85F", "name": "Mi servidor 1", "rights": "RW" }, { "id": "39AA65F5D5B02FA02D58173094EBAF95", "name": "My Server remame", "rights": "R" } ] }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/oneandone/auth_error.json0000664000175000017500000000014313153541406027532 0ustar kamikami00000000000000{ "type":"UNAUTHORIZED", "message":"The Token you are using is not valid", "errors":null }apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/0000775000175000017500000000000013160535107025143 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/list_nodes_page_1.json0000664000175000017500000000371212701023453031414 0ustar kamikami00000000000000{ "droplets": [ { "id": 3164444, "name": "example.com", "memory": 512, "vcpus": 1, "disk": 20, "locked": false, "status": "active", "kernel": { "id": 2233, "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", "version": "3.13.0-37-generic" }, "created_at": "2014-11-14T16:29:21Z", "features": [ "backups", "ipv6", "virtio" ], "backup_ids": [ 7938002 ], "snapshot_ids": [ ], "image": { "id": 6918990, "name": "14.04 x64", "distribution": "Ubuntu", "slug": "ubuntu-14-04-x64", "public": true, "regions": [ "nyc1", "ams1", "sfo1", "nyc2", "ams2", "sgp1", "lon1", "nyc3", "ams3", "nyc3" ], "created_at": "2014-10-17T20:24:33Z", "min_disk_size": 20 }, "size_slug": "512mb", "networks": { "v4": [ { "ip_address": "104.236.32.182", "netmask": "255.255.192.0", "gateway": "104.236.0.1", "type": "public" } ], "v6": [ { "ip_address": "2604:A880:0800:0010:0000:0000:02DD:4001", "netmask": 64, "gateway": "2604:A880:0800:0010:0000:0000:0000:0001", "type": "public" } ] }, "region": { "name": "New York 3", "slug": "nyc3", "sizes": [ ], "features": [ "virtio", "private_networking", "backups", "ipv6", "metadata" ], "available": null } } ], "links": { "pages": { "last":"https://api.digitalocean.com/v2/droplets?page=2", "next":"https://api.digitalocean.com/v2/droplets?page=2" } }, "meta": { "total":2 } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/list_key_pairs.json0000664000175000017500000000037612701023453031061 0ustar kamikami00000000000000{ "ssh_keys": [ { "id": 7717, "fingerprint": "f5:d1:78:ed:28:72:5f:e1:ac:94:fd:1f:e0:a3:48:6d", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDGk5 example", "name": "test1" } ], "meta": { "total": 1 } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/create_image.json0000664000175000017500000000036412701023453030442 0ustar kamikami00000000000000{ "action": { "id": 36805022, "status": "in-progress", "type": "snapshot", "started_at": "2014-11-14T16:34:39Z", "completed_at": null, "resource_id": 3164450, "resource_type": "droplet", "region": "nyc3" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/ex_change_kernel.json0000664000175000017500000000037113153541406031321 0ustar kamikami00000000000000{ "action": { "id": 36077295, "status": "in-progress", "type": "kernel_change", "started_at": "2014-11-04T17:08:03Z", "completed_at": null, "resource_id": 3067650, "resource_type": "droplet", "region": "ams2" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/ex_power_on_node.json0000664000175000017500000000036412701023453031366 0ustar kamikami00000000000000{ "action": { "id": 36804758, "status": "in-progress", "type": "power_on", "started_at": "2014-11-14T16:31:19Z", "completed_at": null, "resource_id": 3164450, "resource_type": "droplet", "region": "nyc3" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/ex_hard_reboot.json0000664000175000017500000000036713153541406031031 0ustar kamikami00000000000000{ "action": { "id": 36077294, "status": "in-progress", "type": "power_cycle", "started_at": "2014-11-04T17:08:03Z", "completed_at": null, "resource_id": 3067651, "resource_type": "droplet", "region": "ams3" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/create_key_pair.json0000664000175000017500000000031612701023453031160 0ustar kamikami00000000000000{ "ssh_key": { "id": 7717, "fingerprint": "f5:d1:78:ed:28:72:5f:e1:ac:94:fd:1f:e0:a3:48:6d", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQsxRiUKn example", "name": "test1" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/ex_shutdown_node.json0000664000175000017500000000036412701023453031411 0ustar kamikami00000000000000{ "action": { "id": 36077293, "status": "in-progress", "type": "shutdown", "started_at": "2014-11-04T17:08:03Z", "completed_at": null, "resource_id": 3067649, "resource_type": "droplet", "region": "nyc2" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/get_image.json0000664000175000017500000000035512701023453027756 0ustar kamikami00000000000000{ "image": { "id": 12345, "name": "My snapshot", "distribution": "Ubuntu", "slug": null, "public": false, "regions": [ "nyc2" ], "created_at": "2014-11-04T22:23:02Z", "min_disk_size": 20 } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/digitalocean/error.json0000664000175000017500000000006212701023453027161 0ustar kamikami00000000000000{"id":"","message":"Unable to authenticate you."} apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/0000775000175000017500000000000013160535107025337 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000021100000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000341412701023453032507 0ustar kamikami00000000000000 e75ead52-692f-4314-8725-c8a4f4d13a87 test2 test2 node 1 2048 10 0 UNIX REDHAT5/64 52ed8b72-ebea-11df-bdc1-001517c46384 52f4c05b-341e-4ac3-b688-bdd78e43ca9e 10.162.151.11 DEPLOY_SERVER 2011-03-20T22:32:23.000Z copia apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/port_list_get.xml0000664000175000017500000000114313153541406030737 0ustar kamikami00000000000000 MyPortList Production Servers NORMAL 2015-06-21T18:54:42.000Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server_NA3.xml0000664000175000017500000000604212704221640031415 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 ././@LongLink0000000000000000000000000000024700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000202712701023453032506 0ustar kamikami00000000000000 Delete Local Storage SUCCESS Server "Disk Delete" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_rebootServer.xml0000664000175000017500000000056012704221640032126 0ustar kamikami00000000000000 REBOOT_SERVER IN_PROGRESS Request to reboot Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_enableServerMonitoring.xml0000664000175000017500000000052512704221640034131 0ustar kamikami00000000000000 ENABLE_SERVER_MONITORING OK Monitoring on Server 'Production Server' has been enabled. ././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000203512701023453032505 0ustar kamikami00000000000000 Restart Server ERROR Operation in progress on Server with Id 11 REASON_392 ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000174612701023453032515 0ustar kamikami00000000000000 Graceful Shutdown Server SUCCESS Server "Graceful Shutdown" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/infrastructure_datacenter_NA9.xml0000664000175000017500000000601412704221640034000 0ustar kamikami00000000000000 US - East 3 - MCP 2.0 Ashburn Virginia US https://na9.cloud-vpn.net ftps-na.cloud-vpn.net Standard STD Standard Disk Speed High Performance HPF Faster than Standard. Uses 15000 RPM disk with Fast Cache. Economy ECN Slower than Standard. Uses 7200 RPM disk without Fast Cache. Standard Standard CPU Speed High Performance Faster and more consistent than Standard. Suitable for applications that are more CPU intensive. ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_firewallRule_d0a20f59_77b0000664000175000017500000000105312704221640033355 0ustar kamikami00000000000000 8cdfd607-f429-4df6-9352-162cfc0891be CCDEFAULT.BlockOutboundMailIPv4 DROP IPV4 TCP true NORMAL ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000201612701023453032504 0ustar kamikami00000000000000 Power Off Server SUCCESS Server "Power Off" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_GetServer.xml0000664000175000017500000000363613153541406031366 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true stopped SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 **OR** apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_powerOffServer.xml0000664000175000017500000000056612704221640032431 0ustar kamikami00000000000000 POWER_OFF_SERVER IN_PROGRESS Request to power off Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_addDisk.xml0000664000175000017500000000113312704474244031017 0ustar kamikami00000000000000 ADD_DISK IN_PROGRESS The request to add a 20 GB Standard Speed Disk on Server 'SERVER-1' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_applyTags.xml0000664000175000017500000000045113153541406030661 0ustar kamikami00000000000000 APPLY_TAGS OK Tag(s) successfully applied. ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000172212701023453032507 0ustar kamikami00000000000000 Restart Server SUCCESS Server "Restart" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_myaccount.xml0000664000175000017500000000303412701023453031354 0ustar kamikami00000000000000 testuser Test User Test User test@example.com 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 create image reports server primary administrator network ././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000325512701023453032512 0ustar kamikami00000000000000 4bba37be-506f-11e3-b29c-001517c4643e test-net1 Test Network. NA5 10.192.176.0 false 208e3a8e-9d2f-11e2-b29c-001517c4643e Test Network Network description NA9 10.172.74.0 false apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_editVlan.xml0000664000175000017500000000050012704221640031370 0ustar kamikami00000000000000 EDIT_VLAN OK VLAN 'test' was edited successfully ././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000203712701023453032507 0ustar kamikami00000000000000 Power Off Server ERROR Operation in progress on Server with Id 11 REASON_392 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_removeTag.xml0000664000175000017500000000045213153541406030647 0ustar kamikami00000000000000 REMOVE_TAGS OK Tag(s) successfully removed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deleteNetworkDomain.xml0000664000175000017500000000064712704221640033602 0ustar kamikami00000000000000 DELETE_NETWORK_DOMAIN IN_PROGRESS Request to Delete Network Domain (Id: 8cdfd607-f429-4df6-9352- 162cfc0891be) has been accepted and is being processed apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tag_list.xml0000664000175000017500000000254213153541406030526 0ustar kamikami00000000000000 SERVER 09242b55-3bc8-4cb7-b30c-4158267f58e6 App server NA9 5ab77f5f-5aa9-426f-8459-4eab34e03d54 ChangeNameTest No way! true true NETWORK_DOMAIN 1a16bf5e-583b-42c9-af94-a92d9ee1607f An Ho1a Demo NA9 d047c609-93d7-4bc5-8fc9-732c85840075 AaronTestModified Success true true SERVER 77da591d-b58e-43ef-8bc2-ddde3f732893 Test 1 NA9 d047c609-93d7-4bc5-8fc9-732c85840075 AaronTestModified Test VMware true true apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/infrastructure_datacenter.xml0000664000175000017500000002351712704221640033340 0ustar kamikami00000000000000 US - West Santa Clara California US https://na3.cloud-vpn.net ftps-na.cloud-vpn.net High Performance HPF Faster than Standard. Uses 15000 RPM disk with Fast Cache. Standard STD Standard Disk Speed Economy ECN Slower than Standard. Uses 7200 RPM disk without Fast Cache. US - East Ashburn Virginia US https://na1.cloud-vpn.net ftps-na.cloud-vpn.net Standard STD Standard Disk Speed US - East 2 Ashburn Virginia US https://na5.cloud-vpn.net ftps-na.cloud-vpn.net High Performance HPF Faster than Standard. Uses 15000 RPM disk with Fast Cache. Standard STD Standard Disk Speed Economy ECN Slower than Standard. Uses 7200 RPM disk without Fast Cache. US - East 3 - MCP 2.0 Ashburn Virginia US https://na9.cloud-vpn.net ftps-na.cloud-vpn.net Standard STD Standard Disk Speed High Performance HPF Faster than Standard. Uses 15000 RPM disk with Fast Cache. Economy ECN Slower than Standard. Uses 7200 RPM disk without Fast Cache. US - West - MCP 2.0 Santa Clara California US https://na12.cloud-vpn.net ftps-na.cloud-vpn.net Standard STD Standard Disk Speed High Performance HPF Faster than Standard. Uses 15000 RPM disk with Fast Cache. Economy ECN Slower than Standard. Uses 7200 RPM disk without Fast Cache. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage.xml0000664000175000017500000000402512704221640030605 0ustar kamikami00000000000000 RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 2015-09-17T11:23:48.000Z T-RHEL-6-64-2-4-10 Win2012 DC 2 CPU Windows 2012 Datacenter 4 2015-09-17T11:44:43.000Z T-WIN-2012-DATACTR-64-2-4-50 Win2008 Std 32-bit 2 CPU Windows 2008 Enterprise R2 32-bit installed with Microsoft SQL Server 2012 Standard Edition 4 MSSQL2008R2S 2014-11-20T12:54:22.000Z T-WIN-2008-ENT-32-2-4-50 ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_base_imageWithDiskSpeed.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_base_imageWithDiskSpeed.x0000664000175000017500000045776412701023453033554 0ustar kamikami00000000000000 RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 2 4096 2015-05-13T07:11:34.000Z NORMAL RedHat 6 64-bit 4 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 8192 2015-05-13T07:11:36.000Z NORMAL RedHat 6 32-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 32-bit 2 4096 2015-05-13T07:11:32.000Z NORMAL RedHat 5 64-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 64-bit 2 4096 2015-05-13T07:11:30.000Z NORMAL RedHat 5 32-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 32-bit 2 4096 2015-05-13T07:11:28.000Z NORMAL CentOS 6 64-bit 2 CPU CentOS Release 6.6 64-bit 2 4096 2015-05-13T07:11:42.000Z NORMAL CentOS 5 64-bit 2 CPU CentOS Release 5.11 64-bit 2 4096 2015-05-13T07:11:40.000Z NORMAL CentOS 5 32-bit 2 CPU CentOS Release 5.11 32-bit 2 4096 2015-05-13T07:11:38.000Z NORMAL Ubuntu 14.04 2 CPU Ubuntu 14.04.2 LTS 64-bit 2 4096 2015-05-13T07:11:50.000Z NORMAL Ubuntu 12.04 2 CPU Ubuntu 12.04.5 LTS 64-bit 2 4096 2015-05-13T07:11:48.000Z NORMAL SuSE Linux Ent 64-bit 2 CPU SuSE Linux Enterprise Server 11 SP3 2 4096 2015-05-13T07:11:44.000Z NORMAL Win2012 R2 Std 2 CPU Windows 2012 R2 Update Standard 2 4096 2015-05-13T07:12:48.000Z NORMAL Win2012 R2 DC 2 CPU Windows 2012 R2 Update Datacenter 2 4096 2015-05-13T07:12:46.000Z NORMAL Win2012 Std 2 CPU Windows 2012 Standard 2 4096 2015-05-13T07:12:44.000Z NORMAL Win2012 DC 2 CPU Windows 2012 Datacenter 2 4096 2015-05-13T07:12:33.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-13T07:12:57.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-13T07:12:55.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Standard 4 8192 MSSQL2012R2S 2015-05-13T07:12:54.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Enterprise 4 8192 MSSQL2012R2E 2015-05-13T07:12:52.000Z NORMAL Win2012 DC with MS SQL 2014 Std Windows 2012 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-13T07:12:41.000Z NORMAL Win2012 DC with MS SQL 2014 Ent Windows 2012 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-13T07:12:40.000Z NORMAL Win2012 DC with MS SQL 2012 Std Windows 2012 Datacenter 64-bit installed with SQL 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-13T07:12:37.000Z NORMAL Win2012 DC with MS SQL 2012 Ent Windows 2012 Datacenter 64-bit installed with SQL 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-13T07:12:35.000Z NORMAL Win2008 R2 Std 2 CPU Windows 2008 R2 Standard SP1 2 4096 2015-05-13T07:12:31.000Z NORMAL Win2008 R2 Ent 2 CPU Windows 2008 R2 Enterprise SP1 2 4096 2015-05-13T07:12:19.000Z NORMAL Win2008 R2 DC 64-bit 2 CPU Windows 2008 R2 Datacenter SP1 2 4096 2015-05-13T07:12:17.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Std Windows 2008 R2 Enterprise SP3 installed with SQL Server 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-13T07:12:25.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-13T07:12:23.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Std Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Standard SP3 4 8192 MSSQL2008R2S 2015-05-13T07:12:29.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Enterprise SP3 4 8192 MSSQL2008R2E 2015-05-13T07:12:27.000Z NORMAL Win2008 R2 Ent 64-Bit with MS SP Fnd Windows 2008 R2 Enterprise installed with SQL Express & SharePoint Foundation 2010 4 8192 MSSPFND2010 2015-05-13T07:12:21.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml00. SID=DD0 NR=00 2 8192 SAPACCEL 2015-05-13T07:12:07.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml01. SID=DD1 NR=00 2 8192 SAPACCEL 2015-05-13T07:12:05.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed00. SID=TD0 NR=00 4 10240 SAPACCEL 2015-05-13T07:12:01.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed01. SID=TD1 NR=00 4 10240 SAPACCEL 2015-05-13T07:12:03.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg00. SID=PD0 NR=00 8 20480 SAPACCEL 2015-05-13T07:11:58.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg01. SID=PD1 NR=00 8 20480 SAPACCEL 2015-05-13T07:11:59.000Z NORMAL Win2008 Std 64-bit 2 CPU Windows 2008 Standard SP2 64-bit 2 4096 2015-05-13T07:12:15.000Z NORMAL Win2008 Ent 64-bit 2 CPU Windows 2008 Enterprise SP2 64-bit 2 4096 2015-05-13T07:12:11.000Z NORMAL Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit 2 4096 2015-05-13T07:12:13.000Z NORMAL Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit 2 4096 2015-05-13T07:12:09.000Z NORMAL CSfM SharePoint 2013 Trial Windows 2012 R2 Standard 64-bit installed with SharePoint 2013 and Visual Studio 2013 Pro (Trial Version) 4 12288 2015-03-19T18:28:32.000Z NORMAL RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 2 4096 2015-06-19T06:03:01.000Z NORMAL RedHat 6 64-bit 4 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 8192 2015-06-19T06:03:30.000Z NORMAL RedHat 6 32-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 32-bit 2 4096 2015-06-19T06:03:25.000Z NORMAL RedHat 5 64-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 64-bit 2 4096 2015-06-19T06:02:48.000Z NORMAL RedHat 5 32-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 32-bit 2 4096 2015-06-19T06:03:44.000Z NORMAL CentOS 6 64-bit 2 CPU CentOS Release 6.6 64-bit 2 4096 2015-06-19T06:03:28.000Z NORMAL CentOS 5 64-bit 2 CPU CentOS Release 5.11 64-bit 2 4096 2015-06-19T06:02:53.000Z NORMAL CentOS 5 32-bit 2 CPU CentOS Release 5.11 32-bit 2 4096 2015-06-19T06:03:17.000Z NORMAL Ubuntu 14.04 2 CPU Ubuntu 14.04.2 LTS 64-bit 2 4096 2015-06-19T06:02:34.000Z NORMAL Ubuntu 12.04 2 CPU Ubuntu 12.04.5 LTS 64-bit 2 4096 2015-06-19T06:03:32.000Z NORMAL SuSE Linux Ent 64-bit 2 CPU SuSE Linux Enterprise Server 11 SP3 2 4096 2015-06-19T06:03:04.000Z NORMAL Win2012 R2 Std 2 CPU Windows 2012 R2 Update Standard 2 4096 2015-06-19T06:02:40.000Z NORMAL Win2012 R2 Std 4 CPU Windows 2012 R2 Update Standard 4 8192 2015-06-19T06:03:16.000Z NORMAL Win2012 R2 DC 2 CPU Windows 2012 R2 Update Datacenter 2 4096 2015-06-19T06:03:41.000Z NORMAL Win2012 Std 2 CPU Windows 2012 Standard 2 4096 2015-06-19T06:02:33.000Z NORMAL Win2012 DC 2 CPU Windows 2012 Datacenter 2 4096 2015-06-19T06:03:19.000Z NORMAL Win2012 DC with MS SQL 2014 Std Windows 2012 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-06-19T06:02:46.000Z NORMAL Win2012 DC with MS SQL 2014 Ent Windows 2012 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-06-19T06:03:06.000Z NORMAL Win2012 DC with MS SQL 2012 Std Windows 2012 Datacenter 64-bit installed with SQL 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-06-19T06:02:50.000Z NORMAL Win2012 DC with MS SQL 2012 Ent Windows 2012 Datacenter 64-bit installed with SQL 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-06-19T06:03:37.000Z NORMAL Win2008 R2 Std 2 CPU Windows 2008 R2 Standard SP1 2 4096 2015-06-19T06:03:35.000Z NORMAL Win2008 R2 Ent 2 CPU Windows 2008 R2 Enterprise SP1 2 4096 2015-06-19T06:02:59.000Z NORMAL Win2008 R2 DC 64-bit 2 CPU Windows 2008 R2 Datacenter SP1 2 4096 2015-06-19T06:03:23.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Std Windows 2008 R2 Enterprise SP3 installed with SQL Server 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-06-19T06:03:21.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-06-19T06:02:36.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Std Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Standard SP3 4 8192 MSSQL2008R2S 2015-06-19T06:03:46.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Enterprise SP3 4 8192 MSSQL2008R2E 2015-06-19T06:03:39.000Z NORMAL Win2008 R2 Ent 64-Bit with MS SP Fnd Windows 2008 R2 Enterprise installed with SQL Express & SharePoint Foundation 2010 4 8192 MSSPFND2010 2015-06-19T06:03:02.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml00. SID=DD0 NR=00 2 8192 SAPACCEL 2015-06-19T06:02:44.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml01. SID=DD1 NR=00 2 8192 SAPACCEL 2015-06-19T06:03:27.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed00. SID=TD0 NR=00 4 10240 SAPACCEL 2015-06-19T06:02:51.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed01. SID=TD1 NR=00 4 10240 SAPACCEL 2015-06-19T06:03:12.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg00. SID=PD0 NR=00 8 20480 SAPACCEL 2015-06-19T06:03:10.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg01. SID=PD1 NR=00 8 20480 SAPACCEL 2015-06-19T06:03:08.000Z NORMAL Win2008 Std 64-bit 2 CPU Windows 2008 Standard SP2 64-bit 2 4096 2015-06-19T06:03:33.000Z NORMAL Win2008 Ent 64-bit 2 CPU Windows 2008 Enterprise SP2 64-bit 2 4096 2015-06-19T06:02:38.000Z NORMAL Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit 2 4096 2015-06-19T06:03:43.000Z NORMAL Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit 2 4096 2015-06-19T06:02:55.000Z NORMAL RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 2 4096 2015-05-15T08:34:51.000Z NORMAL RedHat 6 64-bit 4 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 8192 2015-05-15T08:34:52.000Z NORMAL RedHat 6 32-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 32-bit 2 4096 2015-05-15T08:34:49.000Z NORMAL RedHat 5 64-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 64-bit 2 4096 2015-05-15T08:34:47.000Z NORMAL RedHat 5 32-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 32-bit 2 4096 2015-05-15T08:34:45.000Z NORMAL CentOS 6 64-bit 2 CPU CentOS Release 6.6 64-bit 2 4096 2015-05-15T08:34:58.000Z NORMAL CentOS 5 64-bit 2 CPU CentOS Release 5.11 64-bit 2 4096 2015-05-15T08:34:56.000Z NORMAL CentOS 5 32-bit 2 CPU CentOS Release 5.11 32-bit 2 4096 2015-05-15T08:34:54.000Z NORMAL Ubuntu 14.04 2 CPU Ubuntu 14.04.2 LTS 64-bit 2 4096 2015-05-15T08:35:06.000Z NORMAL Ubuntu 12.04 2 CPU Ubuntu 12.04.5 LTS 64-bit 2 4096 2015-05-15T08:35:04.000Z NORMAL SuSE Linux Ent 64-bit 2 CPU SuSE Linux Enterprise Server 11 SP3 2 4096 2015-05-15T08:35:00.000Z NORMAL Win2012 R2 Std 2 CPU Windows 2012 R2 Update Standard 2 4096 2015-05-15T08:35:50.000Z NORMAL Win2012 R2 DC 2 CPU Windows 2012 R2 Update Datacenter 2 4096 2015-05-15T08:35:46.000Z NORMAL Win2012 Std 2 CPU Windows 2012 Standard 2 4096 2015-05-15T08:35:52.000Z NORMAL Win2012 DC 2 CPU Windows 2012 Datacenter 2 4096 2015-05-15T08:35:36.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-15T08:36:14.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-15T08:36:12.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Standard 4 8192 MSSQL2012R2S 2015-05-15T08:36:10.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Enterprise 4 8192 MSSQL2012R2E 2015-05-15T08:36:08.000Z NORMAL Win2012 DC with MS SQL 2014 Std Windows 2012 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-15T08:35:48.000Z NORMAL Win2012 DC with MS SQL 2014 Ent Windows 2012 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-15T08:35:40.000Z NORMAL Win2012 DC with MS SQL 2012 Std Windows 2012 Datacenter 64-bit installed with SQL 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-15T08:35:38.000Z NORMAL Win2012 DC with MS SQL 2012 Ent Windows 2012 Datacenter 64-bit installed with SQL 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-15T08:35:42.000Z NORMAL Win2008 R2 Std 2 CPU Windows 2008 R2 Standard SP1 2 4096 2015-05-15T08:35:24.000Z NORMAL Win2008 R2 Ent 2 CPU Windows 2008 R2 Enterprise SP1 2 4096 2015-05-15T08:35:26.000Z NORMAL Win2008 R2 DC 64-bit 2 CPU Windows 2008 R2 Datacenter SP1 2 4096 2015-05-15T08:35:34.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Std Windows 2008 R2 Enterprise SP3 installed with SQL Server 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-15T08:35:18.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-15T08:35:30.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Std Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Standard SP3 4 8192 MSSQL2008R2S 2015-05-15T08:35:28.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Enterprise SP3 4 8192 MSSQL2008R2E 2015-05-15T08:35:22.000Z NORMAL Win2008 R2 Ent 64-Bit with MS SP Fnd Windows 2008 R2 Enterprise installed with SQL Express & SharePoint Foundation 2010 4 8192 MSSPFND2010 2015-05-15T08:35:32.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml00. SID=DD0 NR=00 2 8192 SAPACCEL 2015-05-15T08:35:58.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml01. SID=DD1 NR=00 2 8192 SAPACCEL 2015-05-15T08:36:06.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed00. SID=TD0 NR=00 4 10240 SAPACCEL 2015-05-15T08:36:04.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed01. SID=TD1 NR=00 4 10240 SAPACCEL 2015-05-15T08:35:56.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg00. SID=PD0 NR=00 8 20480 SAPACCEL 2015-05-15T08:35:54.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg01. SID=PD1 NR=00 8 20480 SAPACCEL 2015-05-15T08:36:02.000Z NORMAL Win2008 Std 64-bit 2 CPU Windows 2008 Standard SP2 64-bit 2 4096 2015-05-15T08:36:00.000Z NORMAL Win2008 Ent 64-bit 2 CPU Windows 2008 Enterprise SP2 64-bit 2 4096 2015-05-15T08:35:16.000Z NORMAL Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit 2 4096 2015-05-15T08:35:20.000Z NORMAL Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit 2 4096 2015-05-15T08:35:14.000Z NORMAL CSfM SharePoint 2013 Trial Windows 2012 R2 Standard 64-bit installed with SharePoint 2013 and Visual Studio 2013 Pro (Trial Version) 4 12288 2015-03-19T18:30:39.000Z NORMAL RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 2 4096 2015-05-15T08:30:51.000Z NORMAL RedHat 6 64-bit 4 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 8192 2015-05-15T08:30:53.000Z NORMAL RedHat 6 32-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 32-bit 2 4096 2015-05-15T08:30:49.000Z NORMAL RedHat 5 64-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 64-bit 2 4096 2015-05-15T08:30:47.000Z NORMAL RedHat 5 32-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 32-bit 2 4096 2015-05-15T08:30:46.000Z NORMAL CentOS 6 64-bit 2 CPU CentOS Release 6.6 64-bit 2 4096 2015-05-15T08:30:58.000Z NORMAL CentOS 5 64-bit 2 CPU CentOS Release 5.11 64-bit 2 4096 2015-05-15T08:30:56.000Z NORMAL CentOS 5 32-bit 2 CPU CentOS Release 5.11 32-bit 2 4096 2015-05-15T08:30:54.000Z NORMAL Ubuntu 14.04 2 CPU Ubuntu 14.04.2 LTS 64-bit 2 4096 2015-05-15T08:31:05.000Z NORMAL Ubuntu 12.04 2 CPU Ubuntu 12.04.5 LTS 64-bit 2 4096 2015-05-15T08:31:03.000Z NORMAL SuSE Linux Ent 64-bit 2 CPU SuSE Linux Enterprise Server 11 SP3 2 4096 2015-05-15T08:31:00.000Z NORMAL Win2012 R2 Std 2 CPU Windows 2012 R2 Update Standard 2 4096 2015-05-15T08:31:54.000Z NORMAL Win2012 R2 DC 2 CPU Windows 2012 R2 Update Datacenter 2 4096 2015-05-15T08:31:53.000Z NORMAL Win2012 Std 2 CPU Windows 2012 Standard 2 4096 2015-05-15T08:31:51.000Z NORMAL Win2012 DC 2 CPU Windows 2012 Datacenter 2 4096 2015-05-15T08:31:42.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-15T08:32:03.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-15T08:32:01.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Standard 4 8192 MSSQL2012R2S 2015-05-15T08:31:59.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Enterprise 4 8192 MSSQL2012R2E 2015-05-15T08:31:58.000Z NORMAL Win2012 DC with MS SQL 2014 Std Windows 2012 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-15T08:31:49.000Z NORMAL Win2012 DC with MS SQL 2014 Ent Windows 2012 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-15T08:31:47.000Z NORMAL Win2012 DC with MS SQL 2012 Std Windows 2012 Datacenter 64-bit installed with SQL 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-15T08:31:46.000Z NORMAL Win2012 DC with MS SQL 2012 Ent Windows 2012 Datacenter 64-bit installed with SQL 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-15T08:31:44.000Z NORMAL Win2008 R2 Std 2 CPU Windows 2008 R2 Standard SP1 2 4096 2015-05-15T08:31:41.000Z NORMAL Win2008 R2 Ent 2 CPU Windows 2008 R2 Enterprise SP1 2 4096 2015-05-15T08:31:29.000Z NORMAL Win2008 R2 DC 64-bit 2 CPU Windows 2008 R2 Datacenter SP1 2 4096 2015-05-15T08:31:27.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Std Windows 2008 R2 Enterprise SP3 installed with SQL Server 2012 Standard SP2 4 8192 MSSQL2012R2S 2015-05-15T08:31:35.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2012 Enterprise SP2 4 8192 MSSQL2012R2E 2015-05-15T08:31:34.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Std Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Standard SP3 4 8192 MSSQL2008R2S 2015-05-15T08:31:39.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Enterprise SP3 4 8192 MSSQL2008R2E 2015-05-15T08:31:37.000Z NORMAL Win2008 R2 Ent 64-Bit with MS SP Fnd Windows 2008 R2 Enterprise installed with SQL Express & SharePoint Foundation 2010 4 8192 MSSPFND2010 2015-05-15T08:31:32.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml00. SID=DD0 NR=00 2 8192 SAPACCEL 2015-05-15T08:31:17.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml01. SID=DD1 NR=00 2 8192 SAPACCEL 2015-05-15T08:31:19.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed00. SID=TD0 NR=00 4 10240 SAPACCEL 2015-05-15T08:31:20.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed01. SID=TD1 NR=00 4 10240 SAPACCEL 2015-05-15T08:31:15.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg00. SID=PD0 NR=00 8 20480 SAPACCEL 2015-05-15T08:31:12.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg01. SID=PD1 NR=00 8 20480 SAPACCEL 2015-05-15T08:31:14.000Z NORMAL Win2008 Std 64-bit 2 CPU Windows 2008 Standard SP2 64-bit 2 4096 2015-05-15T08:31:31.000Z NORMAL Win2008 Ent 64-bit 2 CPU Windows 2008 Enterprise SP2 64-bit 2 4096 2015-05-15T08:31:24.000Z NORMAL Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit 2 4096 2015-05-15T08:31:25.000Z NORMAL Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit 2 4096 2015-05-15T08:31:22.000Z NORMAL CSfM SharePoint 2013 Trial Windows 2012 R2 Standard 64-bit installed with SharePoint 2013 and Visual Studio 2013 Pro (Trial Version) 4 12288 2015-03-19T18:31:00.000Z NORMAL RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 2 4096 2015-03-04T10:10:53.000Z NORMAL RedHat 6 64-bit 4 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 8192 2015-03-03T16:05:54.000Z NORMAL RedHat 6 32-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 32-bit 2 4096 2015-03-03T16:06:04.000Z NORMAL RedHat 5 64-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 64-bit 2 4096 2015-03-03T16:06:15.000Z NORMAL RedHat 5 32-bit 2 CPU RedHat 5.11 Enterprise (Tikanga) 32-bit 2 4096 2015-03-03T16:06:26.000Z NORMAL CentOS 6 64-bit 2 CPU CentOS Release 6.6 64-bit 2 4096 2015-03-03T16:06:37.000Z NORMAL CentOS 5 64-bit 2 CPU CentOS Release 5.11 64-bit 2 4096 2015-06-30T11:27:51.000Z NORMAL CentOS 5 32-bit 2 CPU CentOS Release 5.11 32-bit 2 4096 2015-06-30T11:25:24.000Z NORMAL Ubuntu 14.04 2 CPU Ubuntu 14.04.2 LTS 64-bit 2 4096 2015-03-18T12:22:57.000Z NORMAL Ubuntu 12.04 2 CPU Ubuntu 12.04.5 LTS 64-bit 2 4096 2015-03-03T16:07:07.000Z NORMAL Ubuntu 10.04 2 CPU Ubuntu 10.04.4 LTS 64-bit 2 4096 2015-03-03T16:07:17.000Z NORMAL SuSE Linux Ent 64-bit 2 CPU SuSE Linux Enterprise Server 11 SP3 2 4096 2015-03-03T16:07:27.000Z NORMAL Win2012 R2 Std 2 CPU Windows 2012 R2 Update Standard 2 4096 2015-03-03T17:02:31.000Z NORMAL Win2012 R2 Std 4 CPU Windows 2012 R2 Update Standard 4 8192 2015-03-03T17:02:41.000Z NORMAL Win2012 R2 DC 2 CPU Windows 2012 R2 Update Datacenter 2 4096 2015-03-03T17:03:11.000Z NORMAL Win2008 Std 64-bit 2 CPU Windows 2008 Standard SP2 64-bit 2 4096 2015-03-03T16:19:46.000Z NORMAL Win2012 Std 2 CPU Windows 2012 Standard 2 4096 2015-03-03T16:19:03.000Z NORMAL Win2012 DC 2 CPU Windows 2012 Datacenter 2 4096 2015-03-03T16:19:11.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-05-04T08:29:07.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2014 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-05-04T08:31:44.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Std Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Standard 4 8192 MSSQL2012R2S 2015-05-04T08:34:45.000Z NORMAL Win2012 R2 Datacenter 64-bit MS SQL 2012 Ent Windows 2012 R2 Datacenter 64-bit installed with SQL 2012 Enterprise 4 8192 MSSQL2012R2E 2015-05-04T08:36:36.000Z NORMAL Win2012 DC with MS SQL 2014 Std Windows 2012 Datacenter 64-bit installed with SQL 2014 Standard 4 8192 MSSQL2014S 2015-03-03T16:10:49.000Z NORMAL Win2012 DC with MS SQL 2014 Ent Windows 2012 Datacenter 64-bit installed with SQL 2014 Enterprise 4 8192 MSSQL2014E 2015-03-03T16:10:59.000Z NORMAL Win2012 DC with MS SQL 2012 Std Windows 2012 Datacenter 64-bit installed with SQL 2012 Standard SP2 4 8192 2015-03-03T16:28:40.000Z NORMAL Win2012 DC with MS SQL 2012 Ent Windows 2012 Datacenter 64-bit installed with SQL 2012 Enterprise SP2 4 8192 2015-03-03T16:28:51.000Z NORMAL Win2008 R2 Std 2 CPU Windows 2008 R2 Standard SP1 2 4096 2015-03-03T16:19:20.000Z NORMAL Win2008 R2 Ent 2 CPU Windows 2008 R2 Enterprise SP1 2 4096 2015-03-03T16:19:29.000Z NORMAL Win2008 R2 DC 64-bit 2 CPU Windows 2008 R2 Datacenter SP1 2 4096 2015-03-03T16:19:37.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Std Windows 2008 R2 Enterprise SP3 installed with SQL Server 2012 Standard SP2 4 8192 2015-03-03T16:29:02.000Z NORMAL Win2008 R2 Ent with MS SQL 2012 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2012 Enterprise SP2 4 8192 2015-03-03T16:29:11.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Std Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Standard SP3 4 8192 2015-03-03T16:29:20.000Z NORMAL Win2008 R2 Ent with MS SQL 2008 R2 Ent Windows 2008 R2 Enterprise 64-bit installed with SQL Server 2008 R2 Enterprise SP3 4 8192 2015-03-03T16:29:29.000Z NORMAL Win2008 R2 Ent 64-Bit with MS SP Fnd Windows 2008 R2 Enterprise installed with SQL Express & SharePoint Foundation 2010 4 8192 2015-03-03T16:35:11.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml00. SID=DD0 NR=00 2 8192 SAPACCEL 2015-03-17T11:45:48.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Small DD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapsml01. SID=DD1 NR=00 2 8192 SAPACCEL 2015-03-17T11:56:07.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed00. SID=TD0 NR=00 4 10240 SAPACCEL 2015-03-17T18:05:03.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Medium TD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=sapmed01. SID=TD1 NR=00 4 10240 SAPACCEL 2015-03-17T18:05:13.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD0 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg00. SID=PD0 NR=00 8 20480 SAPACCEL 2015-03-17T12:03:15.000Z NORMAL Win2008 R2 Ent 64-bit with SAP Large PD1 MSSQL 2012 and SAP ECC6_EHP7 (no licensing). Hostname=saplrg01. SID=PD1 NR=00 8 20480 SAPACCEL 2015-03-17T12:05:04.000Z NORMAL Win2008 Ent 64-bit 2 CPU Windows 2008 Enterprise SP2 64-bit 2 4096 2015-03-03T16:20:00.000Z NORMAL Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit 2 4096 2015-03-03T16:20:09.000Z NORMAL Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit 2 4096 2015-03-03T16:20:18.000Z NORMAL CSfM SharePoint 2013 Trial Windows 2012 R2 Standard 64-bit installed with SharePoint 2013 and Visual Studio 2013 Pro (Trial Version) 4 12288 2015-05-21T15:29:21.000Z NORMAL ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_vlan_0e56433f_d808_4669_821d_812769517ff8.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_vlan_0e56433f_d808_4669_80000664000175000017500000000117512704221640032615 0ustar kamikami00000000000000 Production VLAN test2 10.0.3.1 2607:f480:1111:1153:0:0:0:1 2015-02-13T10:56:44.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_removeTag_BADREQUEST.xml0000664000175000017500000000057013153541406032367 0ustar kamikami00000000000000 REMOVE_TAGS RESOURCE_NOT_FOUND Tag Key(s) (AaronTestModified) not applied to Server eb222a4a-fffd-4e4a-8346-1279ef621ab0. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_startServer.xml0000664000175000017500000000055612704221640031776 0ustar kamikami00000000000000 START_SERVER IN_PROGRESS Request to start Server 'Production Server' has been accepted and is being processed. ././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000203312701023453032503 0ustar kamikami00000000000000 Start Server ERROR Operation in progress on Server with Id 11 REASON_392 ././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000235712701023453032514 0ustar kamikami00000000000000 Delete Anti Affinity Rule ERROR Could not find Anti Affinity Rule with Id 07e3621a-a920-4a9a-943c-d8021f27f418 REASON_693 ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8450000664000175000017500000000036713153541406032636 0ustar kamikami00000000000000 LibcloudTest true true apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_firewallRule.xml0000664000175000017500000000247612704474244032307 0ustar kamikami00000000000000 b30c224c-c95b-4cd9-bb8b-bfdfb5486abf CCDEFAULT.BlockOutboundMailIPv4 DROP IPV4 TCP true NORMAL b30c224c-c95b-4cd9-bb8b-bfdfb5486abf SpecificSourceIP DROP IPV4 TCP true NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_addPublicIpBlock.xml0000664000175000017500000000073012704221640032762 0ustar kamikami00000000000000 ADD_PUBLIC_IP_BLOCK OK Public IPv4 Address Block has been added successfully to Network Domain d3320077-c2ce-4523-8c65-d417e766077b. ././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000247512701023453032515 0ustar kamikami00000000000000 Create Anti Affinity Rule ERROR Server 'ansible-test-image-rhel6' (id 22f3544a-c874-4930-a31c-e9e513e51114) is already used in another Anti-Affinity Rule (id 07e3621a-a920-4a9a-943c-d8021f27f418). REASON_692 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/ip_address_lists_FILTERBYNAME.xml0000664000175000017500000000132013153541406033352 0ustar kamikami00000000000000 Test_IP_Address_List_3 Test Description IPV4 NORMAL 2016-09-05T05:57:53.000Z ././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000203412701023453032504 0ustar kamikami00000000000000 Delete Server ERROR Operation in progress on Server with Id 11 REASON_392 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_removeDisk.xml0000664000175000017500000000065312704474244031572 0ustar kamikami00000000000000 REMOVE_DISK IN_PROGRESS Request to Remove disk '540c4d86-4d84-11e4-a91c-0030487e0302' from Server 'Server 1' has been accepted and is being processed. ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_networkDomain_8cdfd607_f40000664000175000017500000000061012704221640033544 0ustar kamikami00000000000000 test test2 ESSENTIALS 168.128.26.20 2015-07-15T03:36:00.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_cleanServer.xml0000664000175000017500000000064212704221640031717 0ustar kamikami00000000000000 CLEAN_SERVER IN_PROGRESS The request to clean a failed Server deployment for Server e75ead52-692f-4314-8725-c8a4f4d13a87 has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_networkDomain.xml0000664000175000017500000000401512704221640032450 0ustar kamikami00000000000000 Aurora Project Aurora Demo Environments ADVANCED 168.128.2.136 2015-07-13T03:52:16.000Z NORMAL Test net domain description of my network ESSENTIALS 168.128.26.20 2015-07-15T03:36:00.000Z NORMAL another networkj net network netowkrm ESSENTIALS 168.128.26.22 2015-07-15T03:40:09.000Z NORMAL Imports Imported servers ADVANCED 168.128.2.69 2015-08-18T03:53:02.000Z NORMAL Platform R2.0 Lab (MCP 2.0) This is the R2.0 lab for Platform on MCP 2.0 ESSENTIALS 168.128.2.149 2015-08-25T01:39:22.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_updateVmwareTools.xml0000664000175000017500000000056612704221640033140 0ustar kamikami00000000000000 POWER_OFF_SERVER IN_PROGRESS Request to power off Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_editTagKey_BADREQUEST.xml0000664000175000017500000000060113153541406032463 0ustar kamikami00000000000000 EDIT_TAG_KEY NO_CHANGE At least one of name, description, valueRequired or displayOnReport must be changed from its current value. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_base_image.xml0000664000175000017500000002377012701023453031437 0ustar kamikami00000000000000 52ed8b72-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed8b72-ebea-11df-bdc1-001517c46384 RedHat 5.5 64-bit 1 CPU RedHat 5.5 Enterprise (Tikanga), 64-bit UNIX REDHAT5/64 NA10 1 2048 10 0 1970-01-01T00:00:02.010Z 52ed8dca-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed8dca-ebea-11df-bdc1-001517c46384 RedHat 5.5 64-bit 2 CPU RedHat 5.5 Enterprise (Tikanga), 64-bit UNIX REDHAT5/64 NA10 2 4096 10 0 1970-01-01T00:00:02.010Z 52ed8ed8-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed8ed8-ebea-11df-bdc1-001517c46384 RedHat 5.5 64-bit 4 CPU RedHat 5.5 Enterprise (Tikanga), 64-bit UNIX REDHAT5/64 NA10 4 6144 10 0 1970-01-01T00:00:02.010Z 6fc040ae-3605-11e0-bfb5-001517c46384 /oec/base/image/6fc040ae-3605-11e0-bfb5-001517c46384 RedHat 5.5 32-bit 1 CPU RedHat 5.5 Enterprise (Tikanga), 32-bit UNIX REDHAT5/32 NA10 1 2048 10 0 2011-02-11T17:36:19.000Z 52ed92d4-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed92d4-ebea-11df-bdc1-001517c46384 Ubuntu 8.04.4 2 CPU Ubuntu 8.04.4 LTS, 64-bit UNIX UBUNTU8/64 NA10 2 4096 10 0 1970-01-01T00:00:02.010Z 52ed876c-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed876c-ebea-11df-bdc1-001517c46384 Win2008 Ent 64-bit R2 2 CPU Windows 2008 Enterprise R2 64-bit WINDOWS WIN2008R2E/64 NA10 2 4096 50 0 1970-01-01T00:00:02.010Z 52ed8a5a-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed8a5a-ebea-11df-bdc1-001517c46384 Win2008 Ent 64-bit R2 4 CPU Windows 2008 Enterprise R2 64-bit WINDOWS WIN2008R2E/64 NA10 4 8192 50 0 1970-01-01T00:00:02.010Z 52ed865e-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed865e-ebea-11df-bdc1-001517c46384 Win2008 Std 64-bit R2 2 CPU Windows 2008 Standard R2 64-bit WINDOWS WIN2008R2S/64 NA10 2 4096 50 0 1970-01-01T00:00:02.010Z 52ed7b96-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed7b96-ebea-11df-bdc1-001517c46384 Win2008 Std 32-bit 1 CPU Windows 2008 Standard SP2 32-bit WINDOWS WIN2008S/32 NA10 1 2048 50 0 1970-01-01T00:00:02.010Z 52ed7cb8-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed7cb8-ebea-11df-bdc1-001517c46384 Win2008 Std 32-bit 2 CPU Windows 2008 Standard SP2 32-bit WINDOWS WIN2008S/32 NA10 2 4096 50 0 1970-01-01T00:00:02.010Z 52ed7da8-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed7da8-ebea-11df-bdc1-001517c46384 Win2008 Std 32-bit 4 CPU Windows 2008 Standard SP2 32-bit WINDOWS WIN2008S/32 NA10 4 4096 50 0 1970-01-01T00:00:02.010Z 52ed7ea2-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed7ea2-ebea-11df-bdc1-001517c46384 Win2008 Ent 32-bit 2 CPU Windows 2008 Enterprise SP2 32-bit WINDOWS WIN2008E/32 NA10 2 4096 50 0 1970-01-01T00:00:02.010Z 52ed8fd2-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed8fd2-ebea-11df-bdc1-001517c46384 Red Hat 4.8 32-bit 1 CPU Red Hat ES 4.8 (Nahant), 32-bit UNIX REDHAT4/32 NA10 1 2048 10 0 1970-01-01T00:00:02.010Z 52ed90cc-ebea-11df-bdc1-001517c46384 /oec/base/image/52ed90cc-ebea-11df-bdc1-001517c46384 CentOS 5.5 32-bit 1 CPU CentOS release 5.5, 32-bit UNIX CENTOS5/32 NA10 1 2048 10 0 1970-01-01T00:00:02.010Z ././@LongLink0000000000000000000000000000020500000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000201012701023453032476 0ustar kamikami00000000000000 Delete Server SUCCESS Server "Delete" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_resetServer.xml0000664000175000017500000000055612704221640031763 0ustar kamikami00000000000000 RESET_SERVER IN_PROGRESS Request to reset Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_applyTags_BADREQUEST.xml0000664000175000017500000000050213153541406032375 0ustar kamikami00000000000000 APPLY_TAGS RESOURCE_NOT_FOUND Tag Key(s) (ChangeNameTes) not found. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_rebootServer_RESOURCEBUSY.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_rebootServer_RESOURCEBUSY.0000664000175000017500000000054612704221640033423 0ustar kamikami00000000000000 REBOOT_SERVER RESOURCE_BUSY Request to reboot Server 'Production Server' did not work, server is busy. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/ip_address_list_edit.xml0000664000175000017500000000057213153541406032243 0ustar kamikami00000000000000 EDIT_IP_ADDRESS_LIST OK IP Address List 'MyIpAddressList' has been edited successfully. ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_0000664000175000017500000000172012701023453032554 0ustar kamikami00000000000000 Deploy Server SUCCESS Server "Deploy" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/0000775000175000017500000000000013160535107025642 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_NA3.xml0000664000175000017500000000623413153541406031727 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_GetServer.xml0000664000175000017500000000372313153541406031666 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true stopped SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 **OR** apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_clone_response.xml0000664000175000017500000000075513153541406033000 0ustar kamikami00000000000000 CLONE_SERVER IN_PROGRESS Request to Clone Server '9ed47330-5561-11e5-8c14-b8ca3a5d9ef8' has been accepted and is being processed. ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/change_nic_networkadapter_response.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/change_nic_networkadapter_res0000664000175000017500000000052113153541406033625 0ustar kamikami00000000000000 CHANGE_NETWORK_ADAPTER IN_PROGRESS Request to Change Network Adapter for NIC with Id has been accepted and is being processed. ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/exchange_nic_vlans_response.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/exchange_nic_vlans_response.x0000664000175000017500000000062013153541406033566 0ustar kamikami00000000000000 EXCHANGE_NIC_VLANS OK Request to exchange VLANs for Server with Id '1cec8cfb-69e7-4de9-8404-debf116f5242' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/import_image_response.xml0000664000175000017500000000057213153541406032763 0ustar kamikami00000000000000 IMPORT_IMAGE IN_PROGRESS Request to import image has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage.xml0000664000175000017500000000431313153541406031114 0ustar kamikami00000000000000 RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 2015-09-17T11:23:48.000Z T-RHEL-6-64-2-4-10 Win2012 DC 2 CPU Windows 2012 Datacenter 4 2015-09-17T11:44:43.000Z T-WIN-2012-DATACTR-64-2-4-50 Win2008 Std 32-bit 2 CPU Windows 2008 Enterprise R2 32-bit installed with Microsoft SQL Server 2012 Standard Edition 4 MSSQL2008R2S 2014-11-20T12:54:22.000Z T-WIN-2008-ENT-32-2-4-50 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_cleanServer.xml0000664000175000017500000000064213153541406032226 0ustar kamikami00000000000000 CLEAN_SERVER IN_PROGRESS The request to clean a failed Server deployment for Server e75ead52-692f-4314-8725-c8a4f4d13a87 has been accepted and is being processed. ././@LongLink0000000000000000000000000000017600000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_6b4fb0c7_a57b_40000664000175000017500000000127313153541406032655 0ustar kamikami00000000000000 Win2012 DC 2 CPU Windows 2012 Datacenter 4 2015-09-17T11:44:43.000Z T-WIN-2012-DATACTR-64-2-4-50 ././@LongLink0000000000000000000000000000017600000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_c14b1a46_2428_40000664000175000017500000000130013153541406032415 0ustar kamikami00000000000000 RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 2015-09-17T11:23:48.000Z T-RHEL-6-64-2-4-10 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/deploy_customised_server.xml0000664000175000017500000000074213153541406033511 0ustar kamikami00000000000000 DEPLOY_UNCUSTOMIZED_SERVER IN_PROGRESS Request to deploy uncustomized Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage.xml0000664000175000017500000000614213153541406032336 0ustar kamikami00000000000000 ImportedCustomerImage 2 2015-11-19T14:29:02.000Z NORMAL CustomerImageWithPricedSoftwareLabels 1 MSSQL2008R2S 2015-11-03T15:25:34.000Z NORMAL CopiedCustomerImage 2 2015-11-11T17:17:00.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_paginated.xml0000664000175000017500000000631713153541406033304 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_server.xml0000664000175000017500000002662513153541406031274 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true PENDING_ADD DEPLOY_SERVER_WITH_DISK_SPEED 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE RECONFIGURE_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true UNKNOWN SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 21 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true false UNKNOWN SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true false NORMAL ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_2ffa36c8_0000664000175000017500000000176613153541406033207 0ustar kamikami00000000000000 CustomerImageWithPricedSoftwareLabels 1 MSSQL2008R2S 2015-11-03T15:25:34.000Z NORMAL ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_5234e5c7_0000664000175000017500000000222313153541406033033 0ustar kamikami00000000000000 ImportedCustomerImage 2 2015-11-19T14:29:02.000Z NORMAL ././@LongLink0000000000000000000000000000017600000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_e75ead52_692f_40000664000175000017500000000336713153541406032765 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE DEPLOY_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_reconfigureServer.xml0000664000175000017500000000052512704221640033145 0ustar kamikami00000000000000 RECONFIGURE_SERVER OK Request to reconfigure Server 'Production Server' has been accepted and is being processed. ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage_6b4fb0c7_a57b_4f58_0000664000175000017500000000120712704221640033025 0ustar kamikami00000000000000 Win2012 DC 2 CPU Windows 2012 Datacenter 4 2015-09-17T11:44:43.000Z T-WIN-2012-DATACTR-64-2-4-50 ././@LongLink0000000000000000000000000000021400000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000252012701023453032504 0ustar kamikami00000000000000 Delete Anti Affinity Rule SUCCESS Request to delete Server Anti-Affinity Rule between 'ansible-test-image-rhel6' and 'ansible-custom-image-test-UAT' on 'Deloitte Test' has been accepted and is being processed. REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_addNic.xml0000664000175000017500000000074412704221640030633 0ustar kamikami00000000000000 ADD_NIC IN_PROGRESS The request to add NIC for VLAN 'Subsystem VLAN' on Server 'Production Mail Server' has been accepted and is being processed.. ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage_c14b1a46_2428_44c1_0000664000175000017500000000121412704221640032561 0ustar kamikami00000000000000 RedHat 6 64-bit 2 CPU RedHat 6.6 Enterprise (Santiago) 64-bit 4 2015-09-17T11:23:48.000Z T-RHEL-6-64-2-4-10 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_createTagKey_BADREQUEST.xml0000664000175000017500000000051713153541406033007 0ustar kamikami00000000000000 CREATE_TAG_KEY NAME_NOT_UNIQUE Another Tag Key named 'MyTestKey' already exists. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_deleteTagKey_BADREQUEST.xml0000664000175000017500000000047313153541406033007 0ustar kamikami00000000000000 DELETE_TAG_KEY RESOURCE_NOT_FOUND Tag Key fdafdsa not found. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deleteNatRule.xml0000664000175000017500000000055212704221640032366 0ustar kamikami00000000000000 DELETE_NAT_RULE OK NAT Rule with Id 2187a636-7ebb-49a1-a2ff-5d617f496dce has been deleted. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_antiAffinityRule_list.xml0000664000175000017500000000525712704221640033765 0ustar kamikami00000000000000 ansible-test-image-rhel6 my new node ansible-custom-image-test-UAT my new node rhel-ansible-full-test RHEL Ansible Test rhel-ansible-full-test RHEL Ansible Test apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_createFirewallRule.xml0000664000175000017500000000064612704221640033416 0ustar kamikami00000000000000 CREATE_FIREWALL_RULE OK Request create Firewall Rule 'My.Rule.2' successful apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_osImage_BAD_REQUEST.xml0000664000175000017500000000054112704221640032462 0ustar kamikami00000000000000 GET_OS_IMAGE RESOURCE_NOT_FOUND Server Image 2ffa36c8-1848-49eb-b4fa-9d908775f68c not found. ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8450000664000175000017500000000052413153541406032631 0ustar kamikami00000000000000 GET_TAG_KEY RESOURCE_NOT_FOUND Tag Key 5ab77f5f-5aa9-426f-8459-4eab34e03d5 not found. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/audit_log.csv0000664000175000017500000001211713153541406030026 0ustar kamikami00000000000000UUID,Time,Create User,Department,Customer Defined 1,Customer Defined 2,Type,Name,Action,Details,Response Code af779880-0531-45d3-be56-df41c459aa09,2016-09-11 23:59:37,ashaw_plat,,,,SERVER,ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7],Deploy Server,ATTEMPT: DEPLOY_SERVER 'ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7]', d127748c-948f-41f0-b6a7-c0b5a2438bd6,2016-09-12 00:06:00,OEC_SYSTEM,,,,SERVER,ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7],Deploy Server,DEPLOY_SERVER 'ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7]',OK 85f978b2-0e2a-4ba3-a6ce-7567e329e842,2016-09-12 00:22:48,ashaw_plat,,,,SERVER,ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7],Start Server,ATTEMPT: START_SERVER 'ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7]', afc67c82-381a-4ba1-b25e-2285a916d513,2016-09-12 00:23:00,OEC_SYSTEM,,,,SERVER,ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7],Start Server,START_SERVER 'ST2-DEV[10-0-0-5_8bf3420c-73cf-45ab-a923-a461ad87daf7]',OK 7378b530-9d37-4339-88d5-19428431fe07,2016-09-12 05:10:47,ashaw_plat,,,,IP_BLOCK,168.128.37.196,Add IP Block,ATTEMPT: ADD_PUBLIC_IP_BLOCK '168.128.37.196', f978f690-13e8-4d7d-8ce3-1ca7e232df69,2016-09-12 05:10:48,ashaw_plat,,,,IP_BLOCK,168.128.37.196,Add IP Block,ADD_PUBLIC_IP_BLOCK '168.128.37.196',OK 822b7483-bb59-4659-9ab1-7a1ad25bacd7,2016-09-12 05:10:58,ashaw_plat,,,,NAT_RULE,10.0.0.5,Create NAT Rule,ATTEMPT: CREATE_NAT_RULE '10.0.0.5'(requestId:au_20160912T151058137+1000_07422779-ec2a-448e-81f4-87a333b79f3c), 2f29ebd2-2ee0-463f-9a46-2e62f09fcc74,2016-09-12 05:10:59,ashaw_plat,,,,NAT_RULE,10.0.0.5,Create NAT Rule,CREATE_NAT_RULE '10.0.0.5'(requestId:au_20160912T151058137+1000_07422779-ec2a-448e-81f4-87a333b79f3c),OK 503aa140-6da9-4fbe-b94e-b118fc5a473a,2016-09-13 04:08:55,mgreenwood_plat,,,,NAT_RULE,10.208.136.11,Add NAT Rule,ATTEMPT: ADD_NAT_RULE '10.208.136.11', bb73bb95-a20c-46f8-9962-3862496793ff,2016-09-13 04:09:00,OEC_SYSTEM,,,,NAT_RULE,10.208.136.11,Add NAT Rule,ADD_NAT_RULE '10.208.136.11',OK 276ab194-ffb3-4516-920d-96cc068dfcfa,2016-09-13 04:11:54,mgreenwood_plat,,,,NAT_RULE,10.208.136.11,Delete NAT Rule,ATTEMPT: DELETE_NAT_RULE '10.208.136.11', 3bb0e6ad-48e1-4268-9423-4cd680edc404,2016-09-13 04:11:57,OEC_SYSTEM,,,,NAT_RULE,10.208.136.11,Delete NAT Rule,DELETE_NAT_RULE '10.208.136.11',OK f9f2684a-796c-4f28-9448-949900f8f4fb,2016-09-13 04:16:22,mgreenwood_plat,,,,NAT_RULE,10.208.136.11,Add NAT Rule,ATTEMPT: ADD_NAT_RULE '10.208.136.11', 0bfbe81f-35fc-445e-a33e-f12a46c25115,2016-09-13 04:16:27,OEC_SYSTEM,,,,NAT_RULE,10.208.136.11,Add NAT Rule,ADD_NAT_RULE '10.208.136.11',OK 629da5f4-7fdc-4bc6-a408-66ab96f539c0,2016-09-13 04:27:19,mgreenwood_plat,,,,ACL_RULE,AD LDAP Port - For Testing,Add Acl Rule,ATTEMPT: Add Acl Rule AD LDAP Port - For Testing to network Aperture, ecee1409-ce7d-496d-a41b-67662d884598,2016-09-13 04:27:22,mgreenwood_plat,,,,ACL_RULE,AD LDAP Port - For Testing,Add Acl Rule,Add Acl Rule AD LDAP Port - For Testing to network Aperture,OK f6e84763-b911-4189-bf69-138e9a23de7a,2016-09-14 03:05:15,aakbar_plat,,,,SERVER,CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62]',OK ef43056e-1cd0-48bf-80f6-12a4cad8e1c6,2016-09-14 03:05:24,aakbar_plat,,,,SERVER,CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62]',OK 13f38aa4-a86c-45c2-909f-2a6b96c0c761,2016-09-14 03:13:21,aakbar_plat,,,,SERVER,CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244]',OK 22aa854c-17a2-4f48-82a1-c10a052dd932,2016-09-14 05:42:04,mkumar_plat,,,,SERVER,CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62]',OK 6c7f3847-e2e8-47c8-8fd0-8c444015ad19,2016-09-14 05:51:58,mkumar_plat,,,,SERVER,CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244]',OK 69ddc5a5-b815-439b-9d32-a1d60345355b,2016-09-14 06:02:50,mkumar_plat,,,,SERVER,CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-CI[10-209-120-16_ce81b50b-70d4-44ae-94d8-c2af8c22b244]',OK 4ffa5524-926b-4747-9924-581c316b6a60,2016-09-14 06:05:49,mkumar_plat,,,,SERVER,CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62]',OK 599718fe-f58c-439b-87ae-86fb1347708e,2016-09-14 06:06:21,mkumar_plat,,,,SERVER,CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62],Get Console Access Session Token,GENERATE_CONSOLE_ACCESS_SESSION_TOKEN 'CaaS-QA[10-209-120-14_e772c8c0-ee55-4e8f-82f9-22576b494a62]',OK apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_deleteServer.xml0000664000175000017500000000062712704221640032102 0ustar kamikami00000000000000 DELETE_SERVER IN_PROGRESS Request to Delete Server (Id:d577a691-e116-4913-a440- 022d2729fc84) has been accepted and is being processed apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage.xml0000664000175000017500000000554612704221640032036 0ustar kamikami00000000000000 ImportedCustomerImage 2 2015-11-19T14:29:02.000Z NORMAL CustomerImageWithPricedSoftwareLabels 1 MSSQL2008R2S 2015-11-03T15:25:34.000Z NORMAL CopiedCustomerImage 2 2015-11-11T17:17:00.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/port_list_edit.xml0000664000175000017500000000053413153541406031110 0ustar kamikami00000000000000 EDIT_PORT_LIST OK Port List 'MyPortList' has been edited successfully. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_startServer_INPROGRESS.xml0000664000175000017500000000056012704221640033544 0ustar kamikami00000000000000 START_SERVER RESOURCE_BUSY Request to start Server 'Production Server' has been accepted and is being processed. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_disableServerMonitoring.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_disableServerMonitoring.xm0000664000175000017500000000055012704221640034130 0ustar kamikami00000000000000 DISABLE_SERVER_MONITORING OK Monitoring on Server 5783e93f-5370-44fc-a772-cd3c29a2ecaa has been disabled. ././@LongLink0000000000000000000000000000024100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000251612701023453032511 0ustar kamikami00000000000000 Add Network SUCCESS Network created successfully (Network ID: 9eb15060-e2f7-11e1-9153-001b21cfdbe0) REASON_0 ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_9945dc4a_bd0000664000175000017500000000057412704221640033453 0ustar kamikami00000000000000 8cdfd607-f429-4df6-9352-162cfc0891be 168.128.4.18 2 2015-07-13T04:07:37.000Z NORMAL ././@LongLink0000000000000000000000000000021400000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000275012701023453032511 0ustar kamikami00000000000000 Create Anti Affinity Rule SUCCESS Request to create Server Anti-Affinity Rule between 'rhel-ansible-full-test' and 'rhel-ansible-full-test' on 'Deloitte Test' has been accepted and is being processed. REASON_0 5e10b1ab-68f2-4a8b-a49c-d88d623db665 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_shutdownServer.xml0000664000175000017500000000056412704221640032513 0ustar kamikami00000000000000 SHUTDOWN_SERVER IN_PROGRESS Request to shutdown Server 'Production Server' has been accepted and is being processed. ././@LongLink0000000000000000000000000000024500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000047312701023453032511 0ustar kamikami00000000000000 Edit Server SUCCESS Server edited REASON_0 ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_antiAffinityRule_list_PAGINATED.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_antiAffinityRule_list_PAGI0000664000175000017500000000525512704221640033764 0ustar kamikami00000000000000 ansible-test-image-rhel6 my new node ansible-custom-image-test-UAT my new node rhel-ansible-full-test RHEL Ansible Test rhel-ansible-full-test RHEL Ansible Test apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_list.xml0000664000175000017500000000135313153541406031176 0ustar kamikami00000000000000 AaronTestModified Testing for VMWare true true LibcloudTest true true MyTestKey true true ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_deleteServer_RESOURCEBUSY.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_deleteServer_RESOURCEBUSY.0000664000175000017500000000050312704221640033364 0ustar kamikami00000000000000 DELETE_SERVER RESOURCE_BUSY Server is already busy apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/report_usageMonitoring.xml0000664000175000017500000000073712704221640032632 0ustar kamikami00000000000000Date,Location,Server,Server ID,Essentials Monitoring Hours,Advanced Monitoring Hours 2/24/15,NA5,Server A,5a2378ba-11de-11e5-ad2a-ca9798323470,14.25,0 2/24/15,NA5,Server B,6330a644-11de-11e5-ad2a-ca9798323470,0,8.64 2/25/15,NA5,Server A,5a2378ba-11de-11e5-ad2a-ca9798323470,24,0 2/25/15,NA5,Server B,6330a644-11de-11e5-ad2a-ca9798323470,0,24 2/26/15,NA5,Server A,5a2378ba-11de-11e5-ad2a-ca9798323470,17.59,6.41 2/26/15,NA5,Server B,6330a644-11de-11e5-ad2a-ca9798323470,2.11,21.89././@LongLink0000000000000000000000000000026200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000230612701023453032506 0ustar kamikami00000000000000 Change Server Disk Size SUCCESS Server "Change Server Disk Size" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deployNetworkDomain.xml0000664000175000017500000000071712704221640033632 0ustar kamikami00000000000000 DEPLOY_NETWORK_DOMAIN IN_PROGRESS Request to Deploy Network Domain 'A Network Domain' has been accepted and is being processed apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/ip_address_list_delete.xml0000664000175000017500000000061313153541406032554 0ustar kamikami00000000000000 DELETE_IP_ADDRESS_LIST OK IP Address List with Id 84e34850-595d-436e-a885-7cd37edb24a4 has been deleted. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server_paginated.xml0000664000175000017500000000604012704221640032766 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/ip_address_lists.xml0000664000175000017500000000431113153541406031414 0ustar kamikami00000000000000 TestIPList2 Test web server IP addresses list IPV4 NORMAL 2016-09-01T01:19:53.000Z TestIPList_sub_2 Test web server IP addresses list IPV4 NORMAL 2016-09-01T01:20:56.000Z Test_IP_Address_List_2 Test Description IPV4 NORMAL 2016-09-05T05:37:56.000Z Test_IP_Address_List_3 Test Description IPV4 NORMAL 2016-09-05T05:57:53.000Z apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/detailed_usage_report.csv0000664000175000017500000000531513153541406032413 0ustar kamikami00000000000000Name,Type,Location,Private IP Address,Status,"user:ATag","user:BTag","user:CTag",Start Time,End Time,Duration (Hours),CPU Type,CPU Count, RAM (GB), Storage (GB),Economy Storage (GB),CPU Hours,High Performance CPU Hours,RAM Hours,Storage Hours,Economy Storage Hours,Bandwidth-In (GB),Bandwidth-Out (GB),Subadmin Hours,Network Hours,Essentials Network Domain Hours,Advanced Network Domain Hours,VLAN Hours,Public IP Hours,Cloud Files Account Hours,Cloud Storage (GB) Server A,Server,NA1,10.162.12.16,Running,Bob,IT,,19/06/2014 04:16,19/06/2014 11:17,7.03,Standard,1,2,10,0,7.03,0,14.05,70.27,0,0,0,0,0,0,0,0,0,0,0 Server A,Server,NA1,10.162.12.16,Running,Bob,IT,,19/06/2014 11:17,20/06/2014 00:00,12.71,High Performance,1,2,10,0,0,12.71,0,127.06,0,0,0,0,0,0,0,0,0,0,0 Server B,Server,NA1,10.162.12.13,Running,Mary,IT,,19/06/2014 03:42,19/06/2014 11:18,7.61,Essentials,1,2,10,0,0,0,15.22,76.1,0,0,0,0,0,0,0,0,0,0,0 Server B,Server,NA1,10.162.12.13,Stopped,Mary,IT,,19/06/2014 11:18,19/06/2014 23:18,12,Essentials,1,2,10,0,0,0,0,120,0,0,0,0,0,0,0,0,0,0,0 Server B,Server,NA1,10.162.12.13,Stopped,Bob,IT,,19/06/2014 23:18,20/06/2014 00:00,0.689722222,Essentials,1,2,10,0,0,0,0,6.897222223,0,0,0,0,0,0,0,0,0,0,0 Customer Image A,Image,NA1,,Running,Bob,IT,,19/06/2014 03:30,20/06/2014 00:00,20.5,,0,0,10,0,0,0,0,205,0,0,0,0,0,0,0,0,0,0,0 Customer Image B,Image,NA1,,Running,Bob,IT,,19/06/2014 03:10,19/06/2014 03:12,0.04,,0,0,10,0,0,0,0,0.44,0,0,0,0,0,0,0,0,0,0,0 Joseph_F,Sub-Administrator,,,Running,Bob,IT,,19/06/2014 00:00,20/06/2014 00:00,24,,0,0,0,0,0,0,0,0,0,0,0,24,0,0,0,0,0,0,0 Lguy,Sub-Administrator,,,Running,Mary,IT,,19/06/2014 00:00,20/06/2014 00:00,24,,0,0,0,0,0,0,0,0,0,0,0,24,0,0,0,0,0,0,0 a network,Network,NA1,,Running,,IT,,19/06/2014 01:39,20/06/2014 00:00,22.34,,0,0,0,0,0,0,0,0,0,0,0.9,0,22.34,0,0,0,0,0,0 b network,Network,NA1,,Running,,IT,,19/06/2014 01:49,20/06/2014 00:00,22.18,,0,0,0,0,0,0,0,0,0,0,7.1,0,22.18,0,0,0,0,0,0 Rick's Network Domain,Network Domain,NA9,,Running,Bob,IT,,19/06/2014 04:19,20/06/2014 00:00,19.68,,0,0,0,0,0,0,0,0,0,0,7.1,0,22.18,19.68,0,0,0,0,0 Rick's VLAN,VLAN,NA9,,Running,Bob,IT,,19/06/2014 04:22,20/06/2014 00:00,19.63,,0,0,0,0,0,0,0,0,0,0,0,0,22.18,0,0,19.63,0,0,0 files1_koala,Cloud Files,,,Running,,IT,,19/06/2014 00:00,20/06/2014 00:00,24,,0,0,0,0,0,0,0,0,0,0,0.5,0,0,0,0,0,0,24,52 testbed,Cloud Files,,,Running,,IT,,19/06/2014 00:00,20/06/2014 00:00,24,,0,0,0,0,0,0,0,0,0,0,0.01,0,0,0,0,0,0,24,100 Network: b network IP Block: 168.143.92.136(4),IpBlock,NA1,,Reserved,Mary,IT,,19/06/2014 11:47,20/06/2014 00:00,12.2,,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48.82,0,0 TOTAL for 19/06/14,,,,,,,,19/06/2014 00:00,20/06/2014 00:00,24,,0,0,0,0,8,13,30,606,0,3,23,48,89,20,0,20,49,48,152apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_createTagKey.xml0000664000175000017500000000057513153541406031274 0ustar kamikami00000000000000 CREATE_TAG_KEY OK Tag Key 'MyTestKey' has been created. ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000200612701023453032503 0ustar kamikami00000000000000 Start Server SUCCESS Server "Start" issued REASON_0 ././@LongLink0000000000000000000000000000024000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000261512701023453032511 0ustar kamikami00000000000000 Add Local Storage SUCCESS 100 GB of STANDARD local storage is being added to server "abc" on SCSI ID 1 REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server.xml0000664000175000017500000002572012704221640030760 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true PENDING_ADD DEPLOY_SERVER_WITH_DISK_SPEED 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE RECONFIGURE_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true UNKNOWN SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 21 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true false UNKNOWN SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true false NORMAL ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_BAD_REQUEST.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_BAD_REQUEST.x0000664000175000017500000000054712704221640033357 0ustar kamikami00000000000000 GET_CUSTOMER_IMAGE RESOURCE_NOT_FOUND Server Image 2ffa36c8-1848-49eb-b4fa-9d908775f68c not found. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_deployServer.xml0000664000175000017500000000070312704221640032127 0ustar kamikami00000000000000 DEPLOY IN_PROGRESS Request to deploy Server 'Production FTPS Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_list_SINGLE.xml0000664000175000017500000000052113153541406032233 0ustar kamikami00000000000000 LibcloudTest true true ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_changeServerMonitoringPlan.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_changeServerMonitoringPlan0000664000175000017500000000056512704221640034150 0ustar kamikami00000000000000 CHANGE_SERVER_MONITORING_PLAN OK Monitoring on Server 'Production Server' has been changed to Service Plan 'ADVANCED'. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_expandVlan.xml0000664000175000017500000000057212704221640031733 0ustar kamikami00000000000000 EXPAND_VLAN IN_PROGRESS Request to expand VLAN 'Production VLAN' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/summary_usage_report.csv0000664000175000017500000000152513153541406032334 0ustar kamikami00000000000000DAY,Location,CPU Hours,RAM Hours,Storage Hours,Performance Storage Hours,Bandwidth In,Bandwidth Out,Sub-Admin Hours,Network Hours,Public IP Hours,Cloud Files Account Hours,Cloud Files (GB Days),Software Units,Essentials Client Days,Advanced Client Days,Enterprise Client Days,Essentials Backups (GB),Advanced Backups (GB),Enterprise Backups (GB) 01/01/2013,NA1,144,288,3360,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 02/01/2013,NA1,144,288,3360,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 03/01/2013,NA1,144,288,3360,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 04/01/2013,NA1,144,288,3360,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 05/01/2013,NA1,144,288,3360,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 06/01/2013,NA1,172,344,3958,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 07/01/2013,NA1,205,410,4783,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0 08/01/2013,NA1,216,432,5136,264,0,0,72,24,0,0,0,0,0,0,0,0,0,0././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_2ffa36c8_18480000664000175000017500000000167212704221640033221 0ustar kamikami00000000000000 CustomerImageWithPricedSoftwareLabels 1 MSSQL2008R2S 2015-11-03T15:25:34.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deployVlan.xml0000664000175000017500000000067512704221640031754 0ustar kamikami00000000000000 DEPLOY_VLAN IN_PROGRESS Request to deploy VLAN 'Production VLAN' has been accepted and is being processed. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_powerOffServer_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_powerOffServer_INPROGRESS.0000664000175000017500000000057012704221640033456 0ustar kamikami00000000000000 POWER_OFF_SERVER RESOURCE_BUSY Request to power off Server 'Production Server' has been accepted and is being processed. ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000303012701023453032501 0ustar kamikami00000000000000 NA1 US - East Ashburn Virginia US https://opsource-na1.cloud-vpn.net/ true NA10 US - East3 Ashburn Virginia US https://opsource-na1.cloud-vpn.net/ true apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deleteFirewallRule.xml0000664000175000017500000000056012704221640033410 0ustar kamikami00000000000000 DELETE_FIREWALL_RULE OK Firewall Rule (Id:84e34850-595d-436e-a885-7cd37edb24a4) has been deleted apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server_paginated_empty.xml0000664000175000017500000000022313153541406034205 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_editFirewallRule.xml0000664000175000017500000000056012704221640033073 0ustar kamikami00000000000000 EDIT_FIREWALL_RULE OK Firewall Rule with id 1aa3d0ce-d95d-4296-8338-9717e0d37ff9 has been edited apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_deleteTagKey.xml0000664000175000017500000000052413153541406031265 0ustar kamikami00000000000000 DELETE_TAG_KEY OK Tag Key (Id:4f921962-402d-438d-aa37-6f6a0392a1a9) has been deleted. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/ip_address_list_create.xml0000664000175000017500000000070213153541406032554 0ustar kamikami00000000000000 CREATE_IP_ADDRESS_LIST OK IP Address List 'myAddressList' has been created. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/port_list_delete.xml0000664000175000017500000000057713153541406031434 0ustar kamikami00000000000000 DELETE_PORT_LIST OK Port List with Id 84e34850-595d-436e-a885-7cd37edb24a4 has been deleted. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_removeNic.xml0000664000175000017500000000071212704221640031373 0ustar kamikami00000000000000 REMOVE_NIC IN_PROGRESS Request to Remove NIC 5999db1d-725c-46ba-9d4e-d33991e61ab1 for VLAN 'Subsystem VLAN' from Server 'Production Mail Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_editNetworkDomain.xml0000664000175000017500000000055312704221640033261 0ustar kamikami00000000000000 EDIT_NETWORK_DOMAIN OK Network Domain 'Development Network Domain' was edited successfully. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_vlan.xml0000664000175000017500000000247512704221640030577 0ustar kamikami00000000000000 Primary 10.0.0.1 2607:f480:111:1336:0:0:0:1 2015-07-13T03:52:45.000Z NORMAL Wi-Fi Wi-Fi 10.230.98.1 2607:f480:111:1199:0:0:0:1 2015-08-18T23:47:04.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_deleteVlan.xml0000664000175000017500000000060712704221640031715 0ustar kamikami00000000000000 DELETE_VLAN IN_PROGRESS Request to Delete VLAN (Id:0e56433f-d808-4669-821d-812769517ff8) has been accepted and is being processed apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/tag_editTagKey.xml0000664000175000017500000000047013153541406030750 0ustar kamikami00000000000000 EDIT_TAG_KEY OK Tag Key 'ChangeNameTest' has been edited. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/port_list_create.xml0000664000175000017500000000063513153541406031430 0ustar kamikami00000000000000 CREATE_PORT_LIST OK Port List 'MyPortList' has been created. ././@LongLink0000000000000000000000000000026300000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000231012701023453032501 0ustar kamikami00000000000000 Change Server Disk Speed SUCCESS Server "Change Server Disk Speed" issued REASON_0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_removePublicIpBlock.xml0000664000175000017500000000057412704221640033535 0ustar kamikami00000000000000 REMOVE_PUBLIC_IP_BLOCK OK Public Ip Block bef4334a-844b-11e4-a91c-0030487e0302 has been removed successfully apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock.xml0000664000175000017500000000076612704221640032362 0ustar kamikami00000000000000 8cdfd607-f429-4df6-9352-162cfc0891be 168.128.4.18 2 2015-07-13T04:07:37.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_natRule.xml0000664000175000017500000000141212704221640031237 0ustar kamikami00000000000000 484174a2-ae74-4658-9e56- 50fc90e086cf 10.0.0.15 165.180.12.18 2015-03-06T13:43:45.000Z NORMAL 484174a2-ae74-4658-9e56- 50fc90e086cf 10.0.0.16 165.180.12.19 2015-03-06T13:45:10.000Z NORMAL ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000531012701023453032504 0ustar kamikami00000000000000 abadbc7e-9e10-46ca-9d4a-194bcc6b6c16 testnode01 this is testnode01 description 2 2048 10 20 UNIX REDHAT5/64 44ed8b72-ebea-11df-bdc1-001517c46384 53b4c05b-341e-4ac3-b688-bdd78e43ca9e 10.162.1.1 200.16.132.7 10-162-1-1 true 2011-03-02T17:16:09.882Z dbadbc8e-9e10-56ca-5d4a-155bcc5b5c15 testnode02 this is testnode02 description 4 4096 10 20 UNIX REDHAT5/64 44ed8b72-ebea-11df-bdc1-001517c46384 53b4c05b-341e-4ac3-b688-bdd78e43ca9e 10.162.1.2 10-162-1-2 true 2011-03-02T17:16:10.882Z ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/image_customerImage_5234e5c7_01de0000664000175000017500000000212312704221640033175 0ustar kamikami00000000000000 ImportedCustomerImage 2 2015-11-19T14:29:02.000Z NORMAL apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_createNatRule.xml0000664000175000017500000000066012704221640032367 0ustar kamikami00000000000000 CREATE_NAT_RULE OK NAT Rule with Id d31c2db0-be6b-4d50-8744-9a7a534b5fba has been created. ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_4487241a_f00000664000175000017500000000057412704221640033223 0ustar kamikami00000000000000 8cdfd607-f429-4df6-9352-162cfc0891be 168.128.4.18 2 2015-07-13T04:07:37.000Z NORMAL ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/network_natRule_2187a636_7ebb_49a0000664000175000017500000000061212704221640033076 0ustar kamikami00000000000000 484174a2-ae74-4658-9e56-50fc90e086cf 10.0.0.16 165.180.12.19 2015-03-06T13:45:10.000Z NORMAL ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_shutdownServer_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_shutdownServer_INPROGRESS.0000664000175000017500000000056612704221640033547 0ustar kamikami00000000000000 SHUTDOWN_SERVER RESOURCE_BUSY Request to shutdown Server 'Production Server' has been accepted and is being processed. apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/port_list_lists.xml0000664000175000017500000000320513153541406031317 0ustar kamikami00000000000000 MyPortList Production Servers NORMAL 2015-06-21T18:54:42.000Z MyPortList2 Core functions NORMAL 2016-09-01T01:36:49.000Z Test_Port_List_1 Test Description NORMAL 2016-09-06T02:32:29.000Z ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/server_server_e75ead52_692f_4314_0000664000175000017500000000327212704221640033040 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE DEPLOY_SERVER 2015-12-02T11:07:40.000Z devuser1 ././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_80000664000175000017500000000204712701023453032510 0ustar kamikami00000000000000 Graceful Shutdown Server ERROR Operation in progress on Server with Id 11 REASON_392 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/0000775000175000017500000000000013160535110024506 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml0000664000175000017500000000140512701023453030073 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVA0000664000175000017500000000067712701023453033464 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml0000664000175000017500000003604212701023453031261 0ustar kamikami00000000000000 10.112.78.69 The kind of installed guest operating system Red Hat Enterprise Linux 5 (32-bit) Virtual Hardware Virtual Hardware Family 0 testerpart2 vmx-07
hertz * 10^6 Number of Virtual CPUs 2 virtual CPU(s) 1 3 2 count
byte * 2^20 Memory Size 512MB of memory 2 4 512 byte * 2^20
0
SCSI Controller SCSI Controller 0 3 lsilogic 6
0 Hard Disk 1 10485760 9 3 17 10485760 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml0000664000175000017500000000224512701023453030065 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powe0000664000175000017500000000116412701023453033423 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml0000664000175000017500000003604212701023453030422 0ustar kamikami00000000000000 10.112.78.69 The kind of installed guest operating system Red Hat Enterprise Linux 5 (32-bit) Virtual Hardware Virtual Hardware Family 0 testerpart2 vmx-07
hertz * 10^6 Number of Virtual CPUs 2 virtual CPU(s) 1 3 2 count
byte * 2^20 Memory Size 512MB of memory 2 4 512 byte * 2^20
0
SCSI Controller SCSI Controller 0 3 lsilogic 6
0 Hard Disk 1 10485760 9 3 17 10485760 ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powe0000664000175000017500000000116312701023453033422 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml0000664000175000017500000000154312701023453031377 0ustar kamikami00000000000000 0 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml0000664000175000017500000000123012701023453030420 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml0000664000175000017500000000122712701023453030405 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_rese0000664000175000017500000000116412701023453033407 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml0000664000175000017500000000046212701023453030031 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml0000664000175000017500000000101412701023453031550 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml0000664000175000017500000000116412701023453033330 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/0000775000175000017500000000000013160535107024507 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_sam0000664000175000017500000000141212701023453034177 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml0000664000175000017500000000077412701023453033550 0ustar kamikami00000000000000 somevalue apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml0000664000175000017500000000044712701023453031236 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml0000664000175000017500000000151112701023453030164 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json0000664000175000017500000000752612701023453030345 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14.000-06:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" }, { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "tenantId": "1337", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", "version": { "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "RegionOne", "tenantId": "1337", "publicURL": "https://127.0.0.1/v2/1337", "versionInfo": "https://127.0.0.1/v2/", "versionList": "https://127.0.0.1/", "versionId": "2" } ], "name": "nova", "type": "compute" }, { "endpoints": [ { "region": "LON", "tenantId": "613469", "publicURL": "https://lon.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://lon.servers.api.rackspacecloud.com/v2/", "versionList": "https://lon.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "region": "LON", "tenantId": "1337", "publicURL": "https://preprod.lon.servers.api.rackspacecloud.com/v2/1337" } ], "name": "cloudServersPreprod", "type": "compute" } ], "user": { "id": "7", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "testuser" } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/300_multiple_choices.json0000664000175000017500000000051512701023453031311 0ustar kamikami00000000000000{"choices": [{"status": "CURRENT", "media-types": [{"base": "application/xml", "type": "application/vnd.openstack.compute+xml;version=2"}, {"base": "application/json", "type": "application/vnd.openstack.compute+json;version=2"}], "id": "v2.0", "links": [{"href": "http://50.56.213.226:8774/v2/v2.0/images/detail", "rel": "self"}]}]} ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_mul0000664000175000017500000000141312701023453034215 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json0000664000175000017500000001460413153541406027475 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14.000-06:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" }, { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "tenantId": "1337", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", "version": { "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "RegionOne", "tenantId": "1337", "publicURL": "https://test_endpoint.com/v2/1337", "versionInfo": "https://test_endpoint.com/v2/", "versionList": "https://test_endpoint.com/", "versionId": "2" }, { "region": "fr1", "tenantId": "1337", "publicURL": "https://test_endpoint.com/v2/1337", "versionInfo": "https://test_endpoint.com/v2/", "versionList": "https://test_endpoint.com/", "versionId": "2" } ], "name": "nova", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "613469", "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", "versionList": "https://dfw.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "ORD", "tenantId": "613469", "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", "versionList": "https://ord.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "IAD", "tenantId": "613469", "publicURL": "https://iad.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://iad.servers.api.rackspacecloud.com/v2/", "versionList": "https://iad.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "SYD", "tenantId": "613469", "publicURL": "https://syd.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://syd.servers.api.rackspacecloud.com/v2/", "versionList": "https://syd.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "HKG", "tenantId": "613469", "publicURL": "https://hkg.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://hkg.servers.api.rackspacecloud.com/v2/", "versionList": "https://hkg.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "1337", "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" } ], "name": "cloudServersPreprod", "type": "compute" } ], "user": { "id": "7", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "testuser" } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml0000664000175000017500000000103112701023453031656 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml0000664000175000017500000000067512701023453030366 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml0000664000175000017500000000017312701023453033117 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml0000664000175000017500000000064212701023453033252 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pen0000664000175000017500000000070712701023453034207 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml0000664000175000017500000000100012701023453032205 0ustar kamikami00000000000000 b d apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml0000664000175000017500000000103112701023453033537 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml0000664000175000017500000000070212701023453031677 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json0000664000175000017500000001375312701023453031734 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14.000-06:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" }, { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "tenantId": "slug", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/slug", "version": { "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "RegionOne", "tenantId": "slug", "publicURL": "https://127.0.0.1/v2/slug", "versionInfo": "https://127.0.0.1/v2/", "versionList": "https://127.0.0.1/", "versionId": "2" } ], "name": "nova", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "613469", "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/slug", "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", "versionList": "https://dfw.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "ORD", "tenantId": "613469", "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/slug", "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", "versionList": "https://ord.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "IAD", "tenantId": "613469", "publicURL": "https://iad.servers.api.rackspacecloud.com/v2/slug", "versionInfo": "https://iad.servers.api.rackspacecloud.com/v2/", "versionList": "https://iad.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "SYD", "tenantId": "613469", "publicURL": "https://syd.servers.api.rackspacecloud.com/v2/slug", "versionInfo": "https://syd.servers.api.rackspacecloud.com/v2/", "versionList": "https://syd.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "HKG", "tenantId": "613469", "publicURL": "https://hkg.servers.api.rackspacecloud.com/v2/slug", "versionInfo": "https://hkg.servers.api.rackspacecloud.com/v2/", "versionList": "https://hkg.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "slug", "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/slug" } ], "name": "cloudServersPreprod", "type": "compute" } ], "user": { "id": "7", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "testuser" } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json0000664000175000017500000000011012701023453032254 0ustar kamikami00000000000000{"unauthorized":{"message":"Username or api key is invalid","code":401}}././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_suc0000664000175000017500000000071012701023453034211 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json0000664000175000017500000000076512701023453032253 0ustar kamikami00000000000000{"auth":{"token":{"expires":"2011-09-18T02:44:17.000-05:00"},"serviceCatalog":{"cloudFilesCDN":[{"region":"ORD","publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS","v1Default":true}],"cloudFiles":[{"region":"ORD","publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS","v1Default":true,"internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS"}],"cloudServers":[{"publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/slug","v1Default":true}]}}} ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xmlapache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_mis0000664000175000017500000000071012701023453034207 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v3__auth.json0000664000175000017500000001562212701023453027253 0ustar kamikami00000000000000{ "token": { "methods": [ "password" ], "roles": [ { "id": "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_" }, { "id": "b258b68172db4403892320f784c4d503", "name": "admin" } ], "expires_at": "2014-08-10T19:15:57.096078Z", "project": { "domain": { "id": "default", "name": "Default" }, "id": "9c4693dce56b493b9b83197d900f7fba", "name": "admin" }, "catalog": [ { "endpoints": [ { "url": "http://controller:8774/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "internal", "id": "b3bfb29033ff4add9c97e523e1022794" }, { "url": "http://192.168.18.100:8774/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "admin", "id": "b52ee215ded7473f94a46512cb94dbf1" }, { "url": "http://192.168.18.100:8774/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "public", "id": "ca8a6e39b9334300bf036c0c4226a173" } ], "type": "compute", "id": "03f123b2253e4852a86b994f86489c0a" }, { "endpoints": [ { "url": "http://192.168.18.100:8776/v1/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "public", "id": "20bf617f334c4bcf82746820f5006599" }, { "url": "http://192.168.18.100:8776/v1/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "admin", "id": "2da639c26463424fa9775e0bf4e9f29e" }, { "url": "http://controller:8776/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "internal", "id": "d568ed6f8c5a4649a6e68b7bcb86694b" } ], "type": "volume", "id": "47f77ba8f3864a03b66024e910ad7247" }, { "endpoints": [ { "url": "http://192.168.18.100:9696", "region": "regionOne", "interface": "admin", "id": "720303f92f81404aa80caa32cd9c7d23" }, { "url": "http://192.168.18.100:9696", "region": "regionOne", "interface": "public", "id": "8823b9edba354bb6bdc944a6b3bb5404" }, { "url": "http://controller:9696", "region": "regionOne", "interface": "internal", "id": "c2a522538037492dbec2173f271ecb32" } ], "type": "network", "id": "9bd61e09d372427f81eca9328f33c510" }, { "endpoints": [ { "url": "http://controller:5000/v2.0", "region": "regionOne", "interface": "internal", "id": "802622da0a874cac8fe2ec7a02d87c44" }, { "url": "http://192.168.18.100:35357/v2.0", "region": "regionOne", "interface": "admin", "id": "8a4eed85ddc748b18cc2b92e64291eb5" }, { "url": "http://192.168.18.100:5000/v2.0", "region": "regionOne", "interface": "public", "id": "9ef69c1600a944b9904f34efb6dc67eb" } ], "type": "identity", "id": "aef833a14f4240d0bbb699f0154add8e" }, { "endpoints": [ { "url": "http://192.168.18.100:9292", "region": "regionOne", "interface": "public", "id": "1aa84aebd3e2467e898e3c18428e3feb" }, { "url": "http://192.168.18.100:9292", "region": "regionOne", "interface": "admin", "id": "3f6aa4ffd0ec47d2862eee1648993bef" }, { "url": "http://192.168.200.1:9292", "region": "regionOne", "interface": "internal", "id": "9f66f90af36949479a6365680afabe12" } ], "type": "image", "id": "c0be10ea61e240f99567f328b9adf3d6" }, { "endpoints": [ { "url": "http://192.168.18.100:8776/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "public", "id": "6c6b0990ccf84f1890e404fddad7b6e5" }, { "url": "http://192.168.18.100:8776/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "admin", "id": "ab0f0bd770494d4399036867935c52ea" }, { "url": "http://controller:8776/v2/9c4693dce56b493b9b83197d900f7fba", "region": "regionOne", "interface": "internal", "id": "fc7c82deda034e52a544da7d00cd28de" } ], "type": "volumev2", "id": "e097b64d701e4ce29f2a69eed4e69856" } ], "extras": {}, "user": { "domain": { "id": "default", "name": "Default" }, "id": "55fba80f022b4855acfc700ae13b2b24", "name": "admin" }, "issued_at": "2014-08-10T18:15:57.096107Z" } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml0000664000175000017500000000333312701023453031456 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml0000664000175000017500000000035412701023453032041 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json0000664000175000017500000000270212701023453027464 0ustar kamikami00000000000000{ "auth": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14-06:00" }, "serviceCatalog": { "cloudFilesCDN": [ { "region": "ORD", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", "v1Default": true }, { "region": "LON", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", "v1Default": false } ], "cloudFiles": [ { "region": "ORD", "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS", "v1Default": true, "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS" }, { "region": "LON", "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS", "v1Default": false, "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS" } ], "cloudServers": [ { "publicURL": "https://servers.api.rackspacecloud.com/v1.0/slug", "v1Default": true } ] } } } apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml0000664000175000017500000000040312701023453032217 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml0000664000175000017500000000033212701023453031175 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/compute/test_gce.py0000664000175000017500000050366013153541406023031 0ustar kamikami00000000000000# License to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for Google Compute Engine Driver """ import datetime import mock import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.gce import ( GCENodeDriver, API_VERSION, timestamp_to_datetime, GCEAddress, GCEBackend, GCEBackendService, GCEFirewall, GCEForwardingRule, GCEHealthCheck, GCENetwork, GCENodeImage, GCERoute, GCERegion, GCETargetHttpProxy, GCEUrlMap, GCEZone, GCESubnetwork) from libcloud.common.google import (GoogleBaseAuthConnection, ResourceNotFoundError, ResourceExistsError, GoogleBaseError) from libcloud.test.common.test_google import GoogleAuthMockHttp, GoogleTestCase from libcloud.compute.base import Node, StorageVolume from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS class GCENodeDriverTest(GoogleTestCase, TestCaseMixin): """ Google Compute Engine Test Class. """ # Mock out a few specific calls that interact with the user, system or # environment. GCEZone._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0) datacenter = 'us-central1-a' def setUp(self): GCEMockHttp.test = self GCENodeDriver.connectionCls.conn_class = GCEMockHttp GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp GCEMockHttp.type = None kwargs = GCE_KEYWORD_PARAMS.copy() kwargs['auth_type'] = 'IA' kwargs['datacenter'] = self.datacenter self.driver = GCENodeDriver(*GCE_PARAMS, **kwargs) def test_default_scopes(self): self.assertEqual(self.driver.scopes, None) def test_timestamp_to_datetime(self): timestamp1 = '2013-06-26T10:05:19.340-07:00' datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19) self.assertEqual(timestamp_to_datetime(timestamp1), datetime1) timestamp2 = '2013-06-26T17:43:15.000-00:00' datetime2 = datetime.datetime(2013, 6, 26, 17, 43, 15) self.assertEqual(timestamp_to_datetime(timestamp2), datetime2) def test_get_object_by_kind(self): obj = self.driver._get_object_by_kind(None) self.assertIsNone(obj) obj = self.driver._get_object_by_kind('') self.assertIsNone(obj) obj = self.driver._get_object_by_kind( 'https://www.googleapis.com/compute/v1/projects/project_name/' 'global/targetHttpProxies/web-proxy') self.assertEqual(obj.name, 'web-proxy') def test_get_region_from_zone(self): zone1 = self.driver.ex_get_zone('us-central1-a') expected_region1 = 'us-central1' region1 = self.driver._get_region_from_zone(zone1) self.assertEqual(region1.name, expected_region1) zone2 = self.driver.ex_get_zone('europe-west1-b') expected_region2 = 'europe-west1' region2 = self.driver._get_region_from_zone(zone2) self.assertEqual(region2.name, expected_region2) def test_get_volume(self): volume_name = 'lcdisk' volume = self.driver.ex_get_volume(volume_name) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) def test_get_volume_location(self): volume_name = 'lcdisk' location = self.driver.zone volume = self.driver.ex_get_volume(volume_name, zone=location) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) def test_get_volume_location_name(self): volume_name = 'lcdisk' location = self.driver.zone volume = self.driver.ex_get_volume(volume_name, zone=location.name) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) def test_find_zone_or_region(self): zone1 = self.driver._find_zone_or_region('libcloud-demo-np-node', 'instances') self.assertEqual(zone1.name, 'us-central2-a') zone2 = self.driver._find_zone_or_region( 'libcloud-demo-europe-np-node', 'instances') self.assertEqual(zone2.name, 'europe-west1-a') region = self.driver._find_zone_or_region('libcloud-demo-address', 'addresses', region=True) self.assertEqual(region.name, 'us-central1') def test_match_images(self): project = 'debian-cloud' image = self.driver._match_images(project, 'debian-7') self.assertEqual(image.name, 'debian-7-wheezy-v20131120') image = self.driver._match_images(project, 'backports') self.assertEqual(image.name, 'backports-debian-7-wheezy-v20131127') def test_build_disk_gce_struct(self): device_name = 'disk_name' disk_name = None source = self.driver.ex_get_volume('lcdisk') is_boot = True # source as input d = self.driver._build_disk_gce_struct( device_name=device_name, source=source, disk_name=disk_name, is_boot=is_boot) self.assertEqual(source.extra['selfLink'], d['source']) self.assertTrue(d['boot']) self.assertTrue(d['autoDelete']) self.assertEqual('READ_WRITE', d['mode']) self.assertFalse('initializeParams' in d) # image as input device_name = 'disk_name' disk_type = self.driver.ex_get_disktype('pd-ssd', 'us-central1-a') image = self.driver.ex_get_image('debian-7') source = None is_boot = True d = self.driver._build_disk_gce_struct(device_name=device_name, disk_type=disk_type, image=image, is_boot=is_boot) self.assertEqual('READ_WRITE', d['mode']) self.assertEqual('PERSISTENT', d['type']) self.assertTrue('initializeParams' in d and isinstance(d['initializeParams'], dict)) self.assertTrue( all(k in d['initializeParams'] for k in ['sourceImage', 'diskType', 'diskName'])) self.assertTrue(d['initializeParams']['sourceImage'].startswith( 'https://')) self.assertTrue(d['autoDelete']) self.assertTrue(d['boot']) def test_build_network_gce_struct(self): network = self.driver.ex_get_network('lcnetwork') address = self.driver.ex_get_address('lcaddress') subnetwork_name = 'cf-972cf02e6ad49112' subnetwork = self.driver.ex_get_subnetwork(subnetwork_name) d = self.driver._build_network_gce_struct(network, subnetwork, address) self.assertTrue('network' in d) self.assertTrue('subnetwork' in d) self.assertTrue('kind' in d and d['kind'] == 'compute#instanceNetworkInterface') network = self.driver.ex_get_network('default') d = self.driver._build_network_gce_struct(network) self.assertTrue('network' in d) self.assertFalse('subnetwork' in d) self.assertTrue('kind' in d and d['kind'] == 'compute#instanceNetworkInterface') def test_build_scheduling_gce_struct(self): self.assertFalse( self.driver._build_scheduling_gce_struct(None, None, None)) # on_host_maintenance bad value should raise a Valueerror self.assertRaises(ValueError, self.driver._build_service_account_gce_struct, 'on_host_maintenance="foobar"') # on_host_maintenance is 'MIGRATE' and prempt is True self.assertRaises(ValueError, self.driver._build_service_account_gce_struct, 'on_host_maintenance="MIGRATE"', 'preemptible=True') # automatic_restart is True and prempt is True self.assertRaises(ValueError, self.driver._build_service_account_gce_struct, 'automatic_restart="True"', 'preemptible=True') actual = self.driver._build_scheduling_gce_struct('TERMINATE', True, False) self.assertTrue('automaticRestart' in actual and actual['automaticRestart'] is True) self.assertTrue('onHostMaintenance' in actual and actual['onHostMaintenance'] == 'TERMINATE') self.assertTrue('preemptible' in actual) self.assertFalse(actual['preemptible']) def test_build_service_account_gce_struct(self): self.assertRaises(ValueError, self.driver._build_service_account_gce_struct, None) input = {'scopes': ['compute-ro']} actual = self.driver._build_service_account_gce_struct(input) self.assertTrue('email' in actual) self.assertTrue('scopes' in actual) def test_build_service_account_gce_list(self): # ensure we have a list self.assertRaises(ValueError, self.driver._build_service_accounts_gce_list, 'foo') # no input actual = self.driver._build_service_accounts_gce_list() self.assertTrue(len(actual) == 1) self.assertTrue('email' in actual[0]) self.assertTrue('scopes' in actual[0]) def test_get_selflink_or_name(self): network = self.driver.ex_get_network('lcnetwork') # object as input actual = self.driver._get_selflink_or_name(network, False, 'network') self.assertEqual('lcnetwork', actual) actual = self.driver._get_selflink_or_name(network, True, 'network') self.assertTrue(actual.startswith('https://')) # name-only as input actual = self.driver._get_selflink_or_name('lcnetwork', True, 'network') self.assertTrue(actual.startswith('https://')) actual = self.driver._get_selflink_or_name('lcnetwork', False, 'network') self.assertTrue('lcnetwork', actual) # if selflinks is true, we need objname self.assertRaises(ValueError, self.driver._get_selflink_or_name, 'lcnetwork', True) def test_ex_get_serial_output(self): self.assertRaises(ValueError, self.driver.ex_get_serial_output, 'foo') node = self.driver.ex_get_node('node-name', 'us-central1-a') self.assertTrue( self.driver.ex_get_serial_output(node), 'This is some serial\r\noutput for you.') def test_ex_list(self): d = self.driver # Test the default case for all list methods # (except list_volume_snapshots, which requires an arg) for list_fn in (d.ex_list_addresses, d.ex_list_backendservices, d.ex_list_disktypes, d.ex_list_firewalls, d.ex_list_forwarding_rules, d.ex_list_healthchecks, d.ex_list_networks, d.ex_list_subnetworks, d.ex_list_project_images, d.ex_list_regions, d.ex_list_routes, d.ex_list_snapshots, d.ex_list_targethttpproxies, d.ex_list_targetinstances, d.ex_list_targetpools, d.ex_list_urlmaps, d.ex_list_zones, d.list_images, d.list_locations, d.list_nodes, d.list_sizes, d.list_volumes): full_list = [item.name for item in list_fn()] li = d.ex_list(list_fn) iter_list = [item.name for sublist in li for item in sublist] self.assertEqual(full_list, iter_list) # Test paging & filtering with a single list function as they require # additional test fixtures list_fn = d.ex_list_regions for count, sublist in zip((2, 1), d.ex_list(list_fn).page(2)): self.assertTrue(len(sublist) == count) for sublist in d.ex_list(list_fn).filter('name eq us-central1'): self.assertTrue(len(sublist) == 1) self.assertEqual(sublist[0].name, 'us-central1') def test_ex_list_addresses(self): address_list = self.driver.ex_list_addresses() address_list_all = self.driver.ex_list_addresses('all') address_list_uc1 = self.driver.ex_list_addresses('us-central1') address_list_global = self.driver.ex_list_addresses('global') self.assertEqual(len(address_list), 2) self.assertEqual(len(address_list_all), 5) self.assertEqual(len(address_list_global), 1) self.assertEqual(address_list[0].name, 'libcloud-demo-address') self.assertEqual(address_list_uc1[0].name, 'libcloud-demo-address') self.assertEqual(address_list_global[0].name, 'lcaddressglobal') names = [a.name for a in address_list_all] self.assertTrue('libcloud-demo-address' in names) def test_ex_list_backendservices(self): self.backendservices_mock = 'empty' backendservices_list = self.driver.ex_list_backendservices() self.assertListEqual(backendservices_list, []) self.backendservices_mock = 'web-service' backendservices_list = self.driver.ex_list_backendservices() web_service = backendservices_list[0] self.assertEqual(web_service.name, 'web-service') self.assertEqual(len(web_service.healthchecks), 1) self.assertEqual(len(web_service.backends), 2) def test_ex_list_healthchecks(self): healthchecks = self.driver.ex_list_healthchecks() self.assertEqual(len(healthchecks), 3) self.assertEqual(healthchecks[0].name, 'basic-check') def test_ex_list_firewalls(self): firewalls = self.driver.ex_list_firewalls() self.assertEqual(len(firewalls), 5) self.assertEqual(firewalls[0].name, 'default-allow-internal') def test_ex_list_forwarding_rules(self): forwarding_rules = self.driver.ex_list_forwarding_rules() forwarding_rules_all = self.driver.ex_list_forwarding_rules('all') forwarding_rules_uc1 = self.driver.ex_list_forwarding_rules( 'us-central1') self.assertEqual(len(forwarding_rules), 2) self.assertEqual(len(forwarding_rules_all), 2) self.assertEqual(forwarding_rules[0].name, 'lcforwardingrule') self.assertEqual(forwarding_rules_uc1[0].name, 'lcforwardingrule') names = [f.name for f in forwarding_rules_all] self.assertTrue('lcforwardingrule' in names) def test_ex_list_forwarding_rules_global(self): forwarding_rules = self.driver.ex_list_forwarding_rules( global_rules=True) self.assertEqual(len(forwarding_rules), 2) self.assertEqual(forwarding_rules[0].name, 'http-rule') names = [f.name for f in forwarding_rules] self.assertListEqual(names, ['http-rule', 'http-rule2']) def test_list_images(self): local_images = self.driver.list_images() all_deprecated_images = self.driver.list_images( ex_include_deprecated=True) debian_images = self.driver.list_images(ex_project='debian-cloud') local_plus_deb = self.driver.list_images( ['debian-cloud', 'project_name']) self.assertEqual(len(local_images), 23) self.assertEqual(len(all_deprecated_images), 156) self.assertEqual(len(debian_images), 2) self.assertEqual(len(local_plus_deb), 3) self.assertEqual(local_images[0].name, 'aws-ubuntu') self.assertEqual(debian_images[1].name, 'debian-7-wheezy-v20131120') def test_ex_destroy_instancegroup(self): name = 'myname' zone = 'us-central1-a' uig = self.driver.ex_get_instancegroup(name, zone) self.assertTrue(self.driver.ex_destroy_instancegroup(uig)) def test_ex_get_instancegroup(self): name = 'myname' loc = 'us-central1-a' actual = self.driver.ex_get_instancegroup(name, loc) self.assertEqual(actual.name, name) self.assertEqual(actual.zone.name, loc) def test_ex_create_instancegroup(self): name = 'myname' loc = 'us-central1-a' actual = self.driver.ex_create_instancegroup(name, loc) self.assertEqual(actual.name, name) self.assertEqual(actual.zone.name, loc) def test_ex_list_instancegroups(self): loc = 'us-central1-a' actual = self.driver.ex_list_instancegroups(loc) self.assertTrue(len(actual) == 2) self.assertEqual(actual[0].name, 'myname') self.assertEqual(actual[1].name, 'myname2') def test_ex_instancegroup_list_instances(self): name = 'myname' loc = 'us-central1-a' gceobj = self.driver.ex_get_instancegroup(name, loc) actual = self.driver.ex_instancegroup_list_instances(gceobj) self.assertTrue(len(actual) == 2) for node in actual: self.assertTrue(isinstance(node, Node)) self.assertEqual(loc, node.extra['zone'].name) def test_ex_instancegroup_add_instances(self): name = 'myname' loc = 'us-central1-a' gceobj = self.driver.ex_get_instancegroup(name, loc) node_name = self.driver.ex_get_node('node-name', loc) lcnode = self.driver.ex_get_node('lcnode-001', loc) node_list = [node_name, lcnode] self.assertTrue( self.driver.ex_instancegroup_add_instances(gceobj, node_list)) def test_ex_instancegroup_remove_instances(self): name = 'myname' loc = 'us-central1-a' gceobj = self.driver.ex_get_instancegroup(name, loc) node_name = self.driver.ex_get_node('node-name', loc) lcnode = self.driver.ex_get_node('lcnode-001', loc) node_list = [node_name, lcnode] self.assertTrue( self.driver.ex_instancegroup_remove_instances(gceobj, node_list)) def test_ex_instancegroup_set_named_ports(self): name = 'myname' loc = 'us-central1-a' gceobj = self.driver.ex_get_instancegroup(name, loc) named_ports = [{'name': 'foo', 'port': 4444}] # base case self.assertTrue( self.driver.ex_instancegroup_set_named_ports(gceobj, named_ports)) # specify nothing, default is empty list self.assertTrue(self.driver.ex_instancegroup_set_named_ports(gceobj)) # specify empty list self.assertTrue( self.driver.ex_instancegroup_set_named_ports(gceobj, [])) # raise valueerror if string is passed in self.assertRaises(ValueError, self.driver.ex_instancegroup_set_named_ports, gceobj, 'foobar') # raise valueerror if dictionary is passed in self.assertRaises(ValueError, self.driver.ex_instancegroup_set_named_ports, gceobj, {'name': 'foo', 'port': 4444}) def test_ex_create_instancegroupmanager(self): name = 'myinstancegroup' zone = 'us-central1-a' size = 4 template_name = 'my-instance-template1' template = self.driver.ex_get_instancetemplate(template_name) mig = self.driver.ex_create_instancegroupmanager( name, zone, template, size, base_instance_name='base-foo') self.assertEqual(mig.name, name) self.assertEqual(mig.size, size) self.assertEqual(mig.zone.name, zone) def test_ex_create_instancetemplate(self): name = 'my-instance-template1' actual = self.driver.ex_create_instancetemplate( name, size='n1-standard-1', image='debian-7', network='default') self.assertEqual(actual.name, name) self.assertEqual(actual.extra['properties']['machineType'], 'n1-standard-1') def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 6) self.assertEqual(locations[0].name, 'asia-east1-a') def test_ex_list_routes(self): routes = self.driver.ex_list_routes() self.assertEqual(len(routes), 3) self.assertTrue('lcdemoroute' in [route.name for route in routes]) def test_ex_list_sslcertificate(self): ssl_name = 'example' certs = self.driver.ex_list_sslcertificates() self.assertEqual(certs[0].name, ssl_name) self.assertTrue(len(certs) == 1) def test_ex_list_subnetworks(self): subnetworks = self.driver.ex_list_subnetworks() self.assertEqual(len(subnetworks), 1) self.assertEqual(subnetworks[0].name, 'cf-972cf02e6ad49112') self.assertEqual(subnetworks[0].cidr, '10.128.0.0/20') subnetworks = self.driver.ex_list_subnetworks('all') self.assertEqual(len(subnetworks), 4) def test_ex_create_sslcertificate(self): ssl_name = 'example' private_key = '-----BEGIN RSA PRIVATE KEY-----\nfoobar==\n-----END RSA PRIVATE KEY-----\n' certificate = '-----BEGIN CERTIFICATE-----\nfoobar==\n-----END CERTIFICATE-----\n' ssl = self.driver.ex_create_sslcertificate( ssl_name, certificate=certificate, private_key=private_key) self.assertEqual(ssl_name, ssl.name) self.assertEqual(certificate, ssl.certificate) def test_ex_create_subnetwork(self): name = 'cf-972cf02e6ad49112' cidr = '10.128.0.0/20' network_name = 'cf' network = self.driver.ex_get_network(network_name) region_name = 'us-central1' region = self.driver.ex_get_region(region_name) # test by network/region name subnet = self.driver.ex_create_subnetwork(name, cidr, network_name, region_name) self.assertTrue(isinstance(subnet, GCESubnetwork)) self.assertTrue(isinstance(subnet.region, GCERegion)) self.assertTrue(isinstance(subnet.network, GCENetwork)) self.assertEqual(subnet.name, name) self.assertEqual(subnet.cidr, cidr) # test by network/region object subnet = self.driver.ex_create_subnetwork(name, cidr, network, region) self.assertTrue(isinstance(subnet, GCESubnetwork)) self.assertTrue(isinstance(subnet.region, GCERegion)) self.assertTrue(isinstance(subnet.network, GCENetwork)) self.assertEqual(subnet.name, name) self.assertEqual(subnet.cidr, cidr) def test_ex_destroy_subnetwork(self): name = 'cf-972cf02e6ad49112' region_name = 'us-central1' region = self.driver.ex_get_region(region_name) # delete with no region self.assertTrue(self.driver.ex_destroy_subnetwork(name)) # delete with region name self.assertTrue(self.driver.ex_destroy_subnetwork(name, region_name)) # delete with region object self.assertTrue(self.driver.ex_destroy_subnetwork(name, region)) def test_ex_get_sslcertificate(self): ssl_name = 'example' ssl = self.driver.ex_get_sslcertificate(ssl_name) self.assertEqual(ssl.name, ssl_name) self.assertTrue(hasattr(ssl, 'certificate')) self.assertTrue(len(ssl.certificate)) def test_ex_get_subnetwork(self): name = 'cf-972cf02e6ad49112' region_name = 'us-central1' region = self.driver.ex_get_region(region_name) # fetch by no region subnetwork = self.driver.ex_get_subnetwork(name) self.assertEqual(subnetwork.name, name) # fetch by region name subnetwork = self.driver.ex_get_subnetwork(name, region_name) self.assertEqual(subnetwork.name, name) # fetch by region object subnetwork = self.driver.ex_get_subnetwork(name, region) self.assertEqual(subnetwork.name, name) def test_ex_list_networks(self): networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 3) self.assertEqual(networks[0].name, 'cf') self.assertEqual(networks[0].mode, 'auto') self.assertEqual(len(networks[0].subnetworks), 4) self.assertEqual(networks[1].name, 'custom') self.assertEqual(networks[1].mode, 'custom') self.assertEqual(len(networks[1].subnetworks), 1) self.assertEqual(networks[2].name, 'default') self.assertEqual(networks[2].mode, 'legacy') def test_list_nodes(self): nodes = self.driver.list_nodes() nodes_all = self.driver.list_nodes(ex_zone='all') nodes_uc1a = self.driver.list_nodes(ex_zone='us-central1-a') self.assertEqual(len(nodes), 1) self.assertEqual(len(nodes_all), 8) self.assertEqual(len(nodes_uc1a), 1) self.assertEqual(nodes[0].name, 'node-name') self.assertEqual(nodes_uc1a[0].name, 'node-name') names = [n.name for n in nodes_all] self.assertTrue('node-name' in names) def test_ex_list_regions(self): regions = self.driver.ex_list_regions() self.assertEqual(len(regions), 3) self.assertEqual(regions[0].name, 'europe-west1') def test_ex_list_snapshots(self): snapshots = self.driver.ex_list_snapshots() self.assertEqual(len(snapshots), 2) self.assertEqual(snapshots[0].name, 'lcsnapshot') def test_ex_list_targethttpproxies(self): target_proxies = self.driver.ex_list_targethttpproxies() self.assertEqual(len(target_proxies), 2) self.assertEqual(target_proxies[0].name, 'web-proxy') names = [t.name for t in target_proxies] self.assertListEqual(names, ['web-proxy', 'web-proxy2']) def test_ex_list_targetinstances(self): target_instances = self.driver.ex_list_targetinstances() target_instances_all = self.driver.ex_list_targetinstances('all') target_instances_uc1 = self.driver.ex_list_targetinstances( 'us-central1-a') self.assertEqual(len(target_instances), 2) self.assertEqual(len(target_instances_all), 2) self.assertEqual(len(target_instances_uc1), 2) self.assertEqual(target_instances[0].name, 'hello') self.assertEqual(target_instances_uc1[0].name, 'hello') names = [t.name for t in target_instances_all] self.assertTrue('lctargetinstance' in names) def test_ex_list_targetpools(self): target_pools = self.driver.ex_list_targetpools() target_pools_all = self.driver.ex_list_targetpools('all') target_pools_uc1 = self.driver.ex_list_targetpools('us-central1') self.assertEqual(len(target_pools), 4) self.assertEqual(len(target_pools_all), 5) self.assertEqual(len(target_pools_uc1), 4) self.assertEqual(target_pools[0].name, 'lctargetpool') self.assertEqual(target_pools_uc1[0].name, 'lctargetpool') names = [t.name for t in target_pools_all] self.assertTrue('www-pool' in names) def test_list_sizes(self): sizes = self.driver.list_sizes() sizes_all = self.driver.list_sizes('all') self.assertEqual(len(sizes), 22) self.assertEqual(len(sizes_all), 100) self.assertEqual(sizes[0].name, 'f1-micro') self.assertEqual(sizes[0].extra['zone'].name, 'us-central1-a') names = [s.name for s in sizes_all] self.assertEqual(names.count('n1-standard-1'), 5) def test_ex_get_license(self): license = self.driver.ex_get_license('suse-cloud', 'sles-12') self.assertEqual(license.name, 'sles-12') self.assertTrue(license.charges_use_fee) def test_list_disktypes(self): disktypes = self.driver.ex_list_disktypes() disktypes_all = self.driver.ex_list_disktypes('all') disktypes_uc1a = self.driver.ex_list_disktypes('us-central1-a') self.assertEqual(len(disktypes), 2) self.assertEqual(len(disktypes_all), 9) self.assertEqual(len(disktypes_uc1a), 2) self.assertEqual(disktypes[0].name, 'pd-ssd') self.assertEqual(disktypes_uc1a[0].name, 'pd-ssd') names = [v.name for v in disktypes_all] self.assertTrue('pd-standard' in names) self.assertTrue('local-ssd' in names) def test_ex_list_instancegroupmanagers(self): instancegroupmanagers = self.driver.ex_list_instancegroupmanagers() instancegroupmanagers_all = self.driver.ex_list_instancegroupmanagers( 'all') instancegroupmanagers_ue1b = self.driver.ex_list_instancegroupmanagers( 'us-east1-b') self.assertEqual(len(instancegroupmanagers), 1) self.assertEqual(len(instancegroupmanagers_all), 2) self.assertEqual(len(instancegroupmanagers_ue1b), 1) def test_ex_instancegroupmanager_list_managed_instances(self): ig_name = 'myinstancegroup' ig_zone = 'us-central1-a' mig = self.driver.ex_get_instancegroupmanager(ig_name, ig_zone) instances = mig.list_managed_instances() self.assertTrue(all([x['currentAction'] == 'NONE' for x in instances])) self.assertTrue('base-foo-2vld' in [x['name'] for x in instances]) self.assertEqual(len(instances), 4) def test_ex_list_instancetemplates(self): instancetemplates = self.driver.ex_list_instancetemplates() self.assertEqual(len(instancetemplates), 1) self.assertEqual(instancetemplates[0].name, 'my-instance-template1') def test_ex_list_autoscalers(self): autoscalers = self.driver.ex_list_autoscalers('all') self.assertEqual(len(autoscalers), 1) self.assertEqual(autoscalers[0].name, 'my-autoscaler') def test_ex_list_urlmaps(self): urlmaps_list = self.driver.ex_list_urlmaps() web_map = urlmaps_list[0] self.assertEqual(web_map.name, 'web-map') self.assertEqual(len(web_map.host_rules), 0) self.assertEqual(len(web_map.path_matchers), 0) self.assertEqual(len(web_map.tests), 0) def test_list_volumes(self): volumes = self.driver.list_volumes() volumes_all = self.driver.list_volumes('all') volumes_uc1a = self.driver.list_volumes('us-central1-a') self.assertEqual(len(volumes), 2) self.assertEqual(len(volumes_all), 17) self.assertEqual(len(volumes_uc1a), 2) self.assertEqual(volumes[0].name, 'lcdisk') self.assertEqual(volumes_uc1a[0].name, 'lcdisk') names = [v.name for v in volumes_all] self.assertTrue('libcloud-demo-europe-boot-disk' in names) def test_ex_list_zones(self): zones = self.driver.ex_list_zones() self.assertEqual(len(zones), 6) self.assertEqual(zones[0].name, 'asia-east1-a') def test_ex_create_address_global(self): address_name = 'lcaddressglobal' address = self.driver.ex_create_address(address_name, 'global') self.assertTrue(isinstance(address, GCEAddress)) self.assertEqual(address.name, address_name) self.assertEqual(address.region, 'global') def test_ex_create_address(self): address_name = 'lcaddress' address = self.driver.ex_create_address(address_name) self.assertTrue(isinstance(address, GCEAddress)) self.assertEqual(address.name, address_name) def test_ex_create_backend(self): # Note: this is an internal object, no API call is made # and no fixture is needed specifically for GCEBackend, however # it does rely on an InstanceGroup object. ig = self.driver.ex_get_instancegroup('myinstancegroup', 'us-central1-a') backend = self.driver.ex_create_backend(ig) self.assertTrue(isinstance(backend, GCEBackend)) self.assertEqual(backend.name, '%s/instanceGroups/%s' % (ig.zone.name, ig.name)) self.assertEqual(backend.instance_group.name, ig.name) self.assertEqual(backend.balancing_mode, 'UTILIZATION') def test_ex_create_backendservice(self): backendservice_name = 'web-service' ig1 = self.driver.ex_get_instancegroup('myinstancegroup', 'us-central1-a') backend1 = self.driver.ex_create_backend(ig1) ig2 = self.driver.ex_get_instancegroup('myinstancegroup2', 'us-central1-a') backend2 = self.driver.ex_create_backend(ig2) backendservice = self.driver.ex_create_backendservice( name=backendservice_name, healthchecks=['lchealthcheck'], backends=[backend1, backend2]) self.assertTrue(isinstance(backendservice, GCEBackendService)) self.assertEqual(backendservice.name, backendservice_name) self.assertEqual(len(backendservice.backends), 2) ig_links = [ig1.extra['selfLink'], ig2.extra['selfLink']] for be in backendservice.backends: self.assertTrue(be['group'] in ig_links) def test_ex_create_healthcheck(self): healthcheck_name = 'lchealthcheck' kwargs = {'host': 'lchost', 'path': '/lc', 'port': 8000, 'interval': 10, 'timeout': 10, 'unhealthy_threshold': 4, 'healthy_threshold': 3, 'description': 'test healthcheck'} hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs) self.assertTrue(isinstance(hc, GCEHealthCheck)) self.assertEqual(hc.name, healthcheck_name) self.assertEqual(hc.path, '/lc') self.assertEqual(hc.port, 8000) self.assertEqual(hc.interval, 10) self.assertEqual(hc.extra['host'], 'lchost') self.assertEqual(hc.extra['description'], 'test healthcheck') def test_ex_create_image(self): volume = self.driver.ex_get_volume('lcdisk') description = 'CoreOS beta 522.3.0' name = 'coreos' family = 'coreos' guest_os_features = ['VIRTIO_SCSI_MULTIQUEUE'] expected_features = [{'type': 'VIRTIO_SCSI_MULTIQUEUE'}] mock_request = mock.Mock() mock_request.side_effect = self.driver.connection.async_request self.driver.connection.async_request = mock_request image = self.driver.ex_create_image( name, volume, description=description, family='coreos', guest_os_features=guest_os_features) self.assertTrue(isinstance(image, GCENodeImage)) self.assertTrue(image.name.startswith(name)) self.assertEqual(image.extra['description'], description) self.assertEqual(image.extra['family'], family) self.assertEqual(image.extra['guestOsFeatures'], expected_features) expected_data = {'description': description, 'family': family, 'guestOsFeatures': expected_features, 'name': name, 'sourceDisk': volume.extra['selfLink'], 'zone': volume.extra['zone'].name} mock_request.assert_called_once_with('/global/images', data=expected_data, method='POST') def test_ex_copy_image(self): name = 'coreos' url = 'gs://storage.core-os.net/coreos/amd64-generic/247.0.0/coreos_production_gce.tar.gz' description = 'CoreOS beta 522.3.0' family = 'coreos' guest_os_features = ['VIRTIO_SCSI_MULTIQUEUE'] expected_features = [{'type': 'VIRTIO_SCSI_MULTIQUEUE'}] image = self.driver.ex_copy_image(name, url, description=description, family=family, guest_os_features=guest_os_features) self.assertTrue(image.name.startswith(name)) self.assertEqual(image.extra['description'], description) self.assertEqual(image.extra['family'], family) self.assertEqual(image.extra['guestOsFeatures'], expected_features) def test_ex_create_firewall(self): firewall_name = 'lcfirewall' allowed = [{'IPProtocol': 'tcp', 'ports': ['4567']}] source_tags = ['libcloud'] firewall = self.driver.ex_create_firewall(firewall_name, allowed, source_tags=source_tags) self.assertTrue(isinstance(firewall, GCEFirewall)) self.assertEqual(firewall.name, firewall_name) def test_ex_create_forwarding_rule(self): fwr_name = 'lcforwardingrule' targetpool = 'lctargetpool' region = 'us-central1' address = 'lcaddress' port_range = '8000-8500' description = 'test forwarding rule' fwr = self.driver.ex_create_forwarding_rule( fwr_name, targetpool, region=region, address=address, port_range=port_range, description=description) self.assertTrue(isinstance(fwr, GCEForwardingRule)) self.assertEqual(fwr.name, fwr_name) self.assertEqual(fwr.region.name, region) self.assertEqual(fwr.protocol, 'TCP') self.assertEqual(fwr.extra['portRange'], port_range) self.assertEqual(fwr.extra['description'], description) def test_ex_create_forwarding_rule_global(self): fwr_name = 'http-rule' target_name = 'web-proxy' address = 'lcaddressglobal' port_range = '80-80' description = 'global forwarding rule' for target in (target_name, self.driver.ex_get_targethttpproxy(target_name)): fwr = self.driver.ex_create_forwarding_rule( fwr_name, target, global_rule=True, address=address, port_range=port_range, description=description) self.assertTrue(isinstance(fwr, GCEForwardingRule)) self.assertEqual(fwr.name, fwr_name) self.assertEqual(fwr.extra['portRange'], port_range) self.assertEqual(fwr.extra['description'], description) def test_ex_create_forwarding_rule_targetpool_keyword(self): """Test backwards compatibility with the targetpool kwarg.""" fwr_name = 'lcforwardingrule' targetpool = 'lctargetpool' region = 'us-central1' address = self.driver.ex_get_address('lcaddress') port_range = '8000-8500' description = 'test forwarding rule' fwr = self.driver.ex_create_forwarding_rule( fwr_name, targetpool=targetpool, region=region, address=address, port_range=port_range, description=description) self.assertTrue(isinstance(fwr, GCEForwardingRule)) self.assertEqual(fwr.name, fwr_name) self.assertEqual(fwr.region.name, region) self.assertEqual(fwr.protocol, 'TCP') self.assertEqual(fwr.extra['portRange'], port_range) self.assertEqual(fwr.extra['description'], description) def test_ex_create_route(self): route_name = 'lcdemoroute' dest_range = '192.168.25.0/24' priority = 1000 route = self.driver.ex_create_route(route_name, dest_range) self.assertTrue(isinstance(route, GCERoute)) self.assertEqual(route.name, route_name) self.assertEqual(route.priority, priority) self.assertTrue("tag1" in route.tags) self.assertTrue(route.extra['nextHopInstance'].endswith( 'libcloud-100')) self.assertEqual(route.dest_range, dest_range) def test_ex_create_network(self): network_name = 'lcnetwork' cidr = '10.11.0.0/16' network = self.driver.ex_create_network(network_name, cidr) self.assertTrue(isinstance(network, GCENetwork)) self.assertEqual(network.name, network_name) self.assertEqual(network.cidr, cidr) def test_ex_create_network_bad_options(self): network_name = 'lcnetwork' cidr = '10.11.0.0/16' self.assertRaises(ValueError, self.driver.ex_create_network, network_name, cidr, mode='auto') self.assertRaises(ValueError, self.driver.ex_create_network, network_name, cidr, mode='foobar') self.assertRaises(ValueError, self.driver.ex_create_network, network_name, None, mode='legacy') def test_ex_set_machine_type_notstopped(self): # get running node, change machine type zone = 'us-central1-a' node = self.driver.ex_get_node('node-name', zone) self.assertRaises(GoogleBaseError, self.driver.ex_set_machine_type, node, 'custom-4-61440') def test_ex_set_machine_type(self): # get stopped node, change machine type zone = 'us-central1-a' node = self.driver.ex_get_node('stopped-node', zone) self.assertEqual(node.size, 'n1-standard-1') self.assertEqual(node.extra['status'], 'TERMINATED') self.assertTrue( self.driver.ex_set_machine_type(node, 'custom-4-11264')) def test_ex_node_start(self): zone = 'us-central1-a' node = self.driver.ex_get_node('stopped-node', zone) self.assertTrue(self.driver.ex_start_node(node)) def test_ex_node_stop(self): zone = 'us-central1-a' node = self.driver.ex_get_node('node-name', zone) self.assertTrue(self.driver.ex_stop_node(node)) # try and stop a stopped node (should work) zone = 'us-central1-a' node = self.driver.ex_get_node('stopped-node', zone) self.assertTrue(self.driver.ex_stop_node(node)) def test_create_node_req(self): image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1') location = self.driver.zone network = self.driver.ex_get_network('default') tags = ['libcloud'] metadata = [{'key': 'test_key', 'value': 'test_value'}] boot_disk = self.driver.ex_get_volume('lcdisk') node_request, node_data = self.driver._create_node_req( 'lcnode', size, image, location, network, tags, metadata, boot_disk) self.assertEqual(node_request, '/zones/%s/instances' % location.name) self.assertEqual(node_data['metadata']['items'][0]['key'], 'test_key') self.assertEqual(node_data['tags']['items'][0], 'libcloud') self.assertEqual(node_data['name'], 'lcnode') self.assertTrue(node_data['disks'][0]['boot']) self.assertIsInstance(node_data['serviceAccounts'], list) self.assertIsInstance(node_data['serviceAccounts'][0], dict) self.assertEqual(node_data['serviceAccounts'][0]['email'], 'default') self.assertIsInstance(node_data['serviceAccounts'][0]['scopes'], list) self.assertEqual(len(node_data['serviceAccounts'][0]['scopes']), 1) self.assertEqual(len(node_data['networkInterfaces']), 1) self.assertTrue(node_data['networkInterfaces'][0][ 'network'].startswith('https://')) def test_create_node_network_opts(self): node_name = 'node-name' size = self.driver.ex_get_size('n1-standard-1') image = self.driver.ex_get_image('debian-7') zone = self.driver.ex_get_zone('us-central1-a') network = self.driver.ex_get_network('lcnetwork') address = self.driver.ex_get_address('lcaddress') ex_nic_gce_struct = [ { "network": "global/networks/lcnetwork", "accessConfigs": [ { "name": "lcnetwork-test", "type": "ONE_TO_ONE_NAT" } ] } ] # Test using default node = self.driver.create_node(node_name, size, image) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') # Test using just the network node = self.driver.create_node(node_name, size, image, location=zone, ex_network=network) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') # Test using just the struct node = self.driver.create_node(node_name, size, image, location=zone, ex_nic_gce_struct=ex_nic_gce_struct) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') # Test both address and struct, should fail self.assertRaises(ValueError, self.driver.create_node, node_name, size, image, location=zone, external_ip=address, ex_nic_gce_struct=ex_nic_gce_struct) # Test both ex_network and struct, should fail self.assertRaises(ValueError, self.driver.create_node, node_name, size, image, location=zone, ex_network=network, ex_nic_gce_struct=ex_nic_gce_struct) def test_create_node_subnetwork_opts(self): node_name = 'sn-node-name' size = self.driver.ex_get_size('n1-standard-1') image = self.driver.ex_get_image('debian-7') zone = self.driver.ex_get_zone('us-central1-a') network = self.driver.ex_get_network('custom-network') subnetwork = self.driver.ex_get_subnetwork('cf-972cf02e6ad49112') ex_nic_gce_struct = [ { "network": "global/networks/custom-network", "subnetwork": "projects/project_name/regions/us-central1/subnetworks/cf-972cf02e6ad49112", "accessConfigs": [ { "name": "External NAT", "type": "ONE_TO_ONE_NAT" } ] } ] # Test using just the network and subnetwork node = self.driver.create_node(node_name, size, image, location=zone, ex_network=network, ex_subnetwork=subnetwork) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') self.assertEqual( node.extra['networkInterfaces'][0]["subnetwork"].split('/')[-1], 'cf-972cf02e6ad49112') # Test using just the struct node = self.driver.create_node(node_name, size, image, location=zone, ex_nic_gce_struct=ex_nic_gce_struct) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') self.assertEqual( node.extra['networkInterfaces'][0]["subnetwork"].split('/')[-1], 'cf-972cf02e6ad49112') # Test using subnetwork selfLink node = self.driver.create_node( node_name, size, image, location=zone, ex_network=network, ex_subnetwork=subnetwork.extra['selfLink']) self.assertEqual(node.extra['networkInterfaces'][0]["name"], 'nic0') self.assertEqual( node.extra['networkInterfaces'][0]["subnetwork"].split('/')[-1], 'cf-972cf02e6ad49112') def test_create_node_disk_opts(self): node_name = 'node-name' size = self.driver.ex_get_size('n1-standard-1') image = self.driver.ex_get_image('debian-7') boot_disk = self.driver.ex_get_volume('lcdisk') disk_type = self.driver.ex_get_disktype('pd-ssd', 'us-central1-a') DEMO_BASE_NAME = "lc-test" gce_disk_struct = [ { "type": "PERSISTENT", "deviceName": '%s-gstruct' % DEMO_BASE_NAME, "initializeParams": { "diskName": '%s-gstruct' % DEMO_BASE_NAME, "sourceImage": image.extra['selfLink'] }, "boot": True, "autoDelete": True }, { "type": "SCRATCH", "deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME, "initializeParams": { "diskType": disk_type.extra['selfLink'] }, "autoDelete": True } ] self.assertRaises(ValueError, self.driver.create_node, node_name, size, None) node = self.driver.create_node(node_name, size, image) self.assertTrue(isinstance(node, Node)) node = self.driver.create_node(node_name, size, None, ex_boot_disk=boot_disk) self.assertTrue(isinstance(node, Node)) node = self.driver.create_node(node_name, size, None, ex_disks_gce_struct=gce_disk_struct) self.assertTrue(isinstance(node, Node)) self.assertRaises(ValueError, self.driver.create_node, node_name, size, None, ex_boot_disk=boot_disk, ex_disks_gce_struct=gce_disk_struct) def test_create_node(self): node_name = 'node-name' image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1') node = self.driver.create_node(node_name, size, image) self.assertTrue(isinstance(node, Node)) self.assertEqual(node.name, node_name) def test_create_node_image_family(self): node_name = 'node-name' size = self.driver.ex_get_size('n1-standard-1') node = self.driver.create_node(node_name, size, image=None, ex_image_family='coreos') self.assertTrue(isinstance(node, Node)) self.assertEqual(node.name, node_name) image = self.driver.ex_get_image('debian-7') self.assertRaises(ValueError, self.driver.create_node, node_name, size, image, ex_image_family='coreos') def test_create_node_req_with_serviceaccounts(self): image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1') location = self.driver.zone network = self.driver.ex_get_network('default') # ex_service_accounts with specific scopes, default 'email' ex_sa = [{'scopes': ['compute-ro', 'pubsub', 'storage-ro']}] node_request, node_data = self.driver._create_node_req( 'lcnode', size, image, location, network, ex_service_accounts=ex_sa) self.assertIsInstance(node_data['serviceAccounts'], list) self.assertIsInstance(node_data['serviceAccounts'][0], dict) self.assertEqual(node_data['serviceAccounts'][0]['email'], 'default') self.assertIsInstance(node_data['serviceAccounts'][0]['scopes'], list) self.assertEqual(len(node_data['serviceAccounts'][0]['scopes']), 3) self.assertTrue('https://www.googleapis.com/auth/devstorage.read_only' in node_data['serviceAccounts'][0]['scopes']) self.assertTrue('https://www.googleapis.com/auth/compute.readonly' in node_data['serviceAccounts'][0]['scopes']) def test_format_metadata(self): in_md = [{'key': 'k0', 'value': 'v0'}, {'key': 'k1', 'value': 'v1'}] out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 2) self.assertTrue(out_md['items'][0]['key'] in ['k0', 'k1']) self.assertTrue(out_md['items'][0]['value'] in ['v0', 'v1']) in_md = [{'k0': 'v0'}, {'k1': 'v1'}] out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 2) self.assertTrue(out_md['items'][0]['key'] in ['k0', 'k1']) self.assertTrue(out_md['items'][0]['value'] in ['v0', 'v1']) in_md = {'key': 'k0', 'value': 'v0'} out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 1, out_md) self.assertEqual(out_md['items'][0]['key'], 'k0') self.assertEqual(out_md['items'][0]['value'], 'v0') in_md = {'k0': 'v0'} out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 1) self.assertEqual(out_md['items'][0]['key'], 'k0') self.assertEqual(out_md['items'][0]['value'], 'v0') in_md = {'k0': 'v0', 'k1': 'v1', 'k2': 'v2'} out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 3) keys = [x['key'] for x in out_md['items']] vals = [x['value'] for x in out_md['items']] keys.sort() vals.sort() self.assertEqual(keys, ['k0', 'k1', 'k2']) self.assertEqual(vals, ['v0', 'v1', 'v2']) in_md = {'items': [{'key': 'k0', 'value': 'v0'}, {'key': 'k1', 'value': 'v1'}]} out_md = self.driver._format_metadata('fp', in_md) self.assertTrue('fingerprint' in out_md) self.assertEqual(out_md['fingerprint'], 'fp') self.assertTrue('items' in out_md) self.assertEqual(len(out_md['items']), 2) self.assertTrue(out_md['items'][0]['key'] in ['k0', 'k1']) self.assertTrue(out_md['items'][0]['value'] in ['v0', 'v1']) in_md = {'items': 'foo'} self.assertRaises(ValueError, self.driver._format_metadata, 'fp', in_md) in_md = {'items': {'key': 'k1', 'value': 'v0'}} self.assertRaises(ValueError, self.driver._format_metadata, 'fp', in_md) in_md = ['k0', 'v1'] self.assertRaises(ValueError, self.driver._format_metadata, 'fp', in_md) def test_create_node_with_metadata(self): node_name = 'node-name' image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1') zone = self.driver.ex_get_zone('us-central1-a') # md is a list of dicts, each with 'key' and 'value' for # backwards compatibility md = [{'key': 'k0', 'value': 'v0'}, {'key': 'k1', 'value': 'v1'}] request, data = self.driver._create_node_req(node_name, size, image, zone, metadata=md) self.assertTrue('items' in data['metadata']) self.assertEqual(len(data['metadata']['items']), 2) # md doesn't contain "items" key md = {'key': 'key1', 'value': 'value1'} request, data = self.driver._create_node_req(node_name, size, image, zone, metadata=md) self.assertTrue('items' in data['metadata']) self.assertEqual(len(data['metadata']['items']), 1) # md contains "items" key md = {'items': [{'key': 'k0', 'value': 'v0'}]} request, data = self.driver._create_node_req(node_name, size, image, zone, metadata=md) self.assertTrue('items' in data['metadata']) self.assertEqual(len(data['metadata']['items']), 1) self.assertEqual(data['metadata']['items'][0]['key'], 'k0') self.assertEqual(data['metadata']['items'][0]['value'], 'v0') def test_create_node_existing(self): node_name = 'libcloud-demo-europe-np-node' image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1', zone='europe-west1-a') self.assertRaises(ResourceExistsError, self.driver.create_node, node_name, size, image, location='europe-west1-a') def test_ex_create_multiple_nodes(self): base_name = 'lcnode' image = self.driver.ex_get_image('debian-7') size = self.driver.ex_get_size('n1-standard-1') number = 2 nodes = self.driver.ex_create_multiple_nodes(base_name, size, image, number) self.assertEqual(len(nodes), 2) self.assertTrue(isinstance(nodes[0], Node)) self.assertTrue(isinstance(nodes[1], Node)) self.assertEqual(nodes[0].name, '%s-000' % base_name) self.assertEqual(nodes[1].name, '%s-001' % base_name) def test_ex_create_multiple_nodes_image_family(self): base_name = 'lcnode' image = None size = self.driver.ex_get_size('n1-standard-1') number = 2 nodes = self.driver.ex_create_multiple_nodes( base_name, size, image, number, ex_image_family='coreos') self.assertEqual(len(nodes), 2) self.assertTrue(isinstance(nodes[0], Node)) self.assertTrue(isinstance(nodes[1], Node)) self.assertEqual(nodes[0].name, '%s-000' % base_name) self.assertEqual(nodes[1].name, '%s-001' % base_name) image = self.driver.ex_get_image('debian-7') self.assertRaises(ValueError, self.driver.ex_create_multiple_nodes, base_name, size, image, number, ex_image_family='coreos') def test_ex_create_targethttpproxy(self): proxy_name = 'web-proxy' urlmap_name = 'web-map' for urlmap in (urlmap_name, self.driver.ex_get_urlmap(urlmap_name)): proxy = self.driver.ex_create_targethttpproxy(proxy_name, urlmap) self.assertTrue(isinstance(proxy, GCETargetHttpProxy)) self.assertEqual(proxy_name, proxy.name) def test_ex_create_targetinstance(self): targetinstance_name = 'lctargetinstance' zone = 'us-central1-a' node = self.driver.ex_get_node('node-name', zone) targetinstance = self.driver.ex_create_targetinstance( targetinstance_name, zone=zone, node=node) self.assertEqual(targetinstance.name, targetinstance_name) self.assertEqual(targetinstance.zone.name, zone) def test_ex_create_targetpool(self): targetpool_name = 'lctargetpool' region = 'us-central1' healthchecks = ['libcloud-lb-demo-healthcheck'] node1 = self.driver.ex_get_node('libcloud-lb-demo-www-000', 'us-central1-b') node2 = self.driver.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') nodes = [node1, node2] targetpool = self.driver.ex_create_targetpool( targetpool_name, region=region, healthchecks=healthchecks, nodes=nodes) self.assertEqual(targetpool.name, targetpool_name) self.assertEqual(len(targetpool.nodes), len(nodes)) self.assertEqual(targetpool.region.name, region) def test_ex_create_targetpool_session_affinity(self): targetpool_name = 'lctargetpool-sticky' region = 'us-central1' session_affinity = 'CLIENT_IP_PROTO' targetpool = self.driver.ex_create_targetpool( targetpool_name, region=region, session_affinity=session_affinity) self.assertEqual(targetpool.name, targetpool_name) self.assertEqual( targetpool.extra.get('sessionAffinity'), session_affinity) def test_ex_create_urlmap(self): urlmap_name = 'web-map' for service in ('web-service', self.driver.ex_get_backendservice('web-service')): urlmap = self.driver.ex_create_urlmap(urlmap_name, service) self.assertTrue(isinstance(urlmap, GCEUrlMap)) self.assertEqual(urlmap_name, urlmap.name) def test_create_volume_image_family(self): volume_name = 'lcdisk' size = 10 volume = self.driver.create_volume(size, volume_name, ex_image_family='coreos') self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) image = self.driver.ex_get_image('debian-7') self.assertRaises(ValueError, self.driver.create_volume, size, volume_name, image=image, ex_image_family='coreos') def test_create_volume_location(self): volume_name = 'lcdisk' size = 10 zone = self.driver.zone volume = self.driver.create_volume(size, volume_name, location=zone) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) def test_ex_create_volume_snapshot(self): snapshot_name = 'lcsnapshot' volume = self.driver.ex_get_volume('lcdisk') snapshot = volume.snapshot(snapshot_name) self.assertEqual(snapshot.name, snapshot_name) self.assertEqual(snapshot.size, '10') def test_create_volume_ssd(self): volume_name = 'lcdisk' size = 10 volume = self.driver.create_volume(size, volume_name, ex_disk_type='pd-ssd') self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.extra['type'], 'pd-ssd') def test_create_volume(self): volume_name = 'lcdisk' size = 10 volume = self.driver.create_volume(size, volume_name) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) def test_ex_update_healthcheck(self): healthcheck_name = 'lchealthcheck' healthcheck = self.driver.ex_get_healthcheck(healthcheck_name) healthcheck.port = 9000 healthcheck2 = self.driver.ex_update_healthcheck(healthcheck) self.assertTrue(isinstance(healthcheck2, GCEHealthCheck)) def test_ex_update_firewall(self): firewall_name = 'lcfirewall' firewall = self.driver.ex_get_firewall(firewall_name) firewall.source_ranges = ['10.0.0.0/16'] firewall.source_tags = ['libcloud', 'test'] firewall2 = self.driver.ex_update_firewall(firewall) self.assertTrue(isinstance(firewall2, GCEFirewall)) def test_ex_targetpool_gethealth(self): targetpool = self.driver.ex_get_targetpool('lb-pool') health = targetpool.get_health('libcloud-lb-demo-www-000') self.assertEqual(len(health), 1) self.assertTrue('node' in health[0]) self.assertTrue('health' in health[0]) self.assertEqual(health[0]['health'], 'UNHEALTHY') def test_ex_targetpool_with_backup_pool(self): targetpool = self.driver.ex_get_targetpool('lb-pool') self.assertTrue('backupPool' in targetpool.extra) self.assertTrue('failoverRatio' in targetpool.extra) def test_ex_targetpool_setbackup(self): targetpool = self.driver.ex_get_targetpool('lb-pool') backup_targetpool = self.driver.ex_get_targetpool('backup-pool') self.assertTrue( targetpool.set_backup_targetpool(backup_targetpool, 0.1)) def test_ex_targetpool_remove_add_node(self): targetpool = self.driver.ex_get_targetpool('lctargetpool') node = self.driver.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') remove_node = self.driver.ex_targetpool_remove_node(targetpool, node) self.assertTrue(remove_node) self.assertEqual(len(targetpool.nodes), 1) add_node = self.driver.ex_targetpool_add_node(targetpool, node.extra['selfLink']) self.assertTrue(add_node) self.assertEqual(len(targetpool.nodes), 2) remove_node = self.driver.ex_targetpool_remove_node( targetpool, node.extra['selfLink']) self.assertTrue(remove_node) self.assertEqual(len(targetpool.nodes), 1) add_node = self.driver.ex_targetpool_add_node(targetpool, node) self.assertTrue(add_node) self.assertEqual(len(targetpool.nodes), 2) # check that duplicates are filtered add_node = self.driver.ex_targetpool_add_node(targetpool, node.extra['selfLink']) self.assertTrue(add_node) self.assertEqual(len(targetpool.nodes), 2) def test_ex_targetpool_remove_add_healthcheck(self): targetpool = self.driver.ex_get_targetpool('lctargetpool') healthcheck = self.driver.ex_get_healthcheck( 'libcloud-lb-demo-healthcheck') remove_healthcheck = self.driver.ex_targetpool_remove_healthcheck( targetpool, healthcheck) self.assertTrue(remove_healthcheck) self.assertEqual(len(targetpool.healthchecks), 0) add_healthcheck = self.driver.ex_targetpool_add_healthcheck( targetpool, healthcheck) self.assertTrue(add_healthcheck) self.assertEqual(len(targetpool.healthchecks), 1) def test_reboot_node(self): node = self.driver.ex_get_node('node-name') reboot = self.driver.reboot_node(node) self.assertTrue(reboot) def test_ex_set_node_tags(self): new_tags = ['libcloud'] node = self.driver.ex_get_node('node-name') set_tags = self.driver.ex_set_node_tags(node, new_tags) self.assertTrue(set_tags) def test_attach_volume_invalid_usecase(self): node = self.driver.ex_get_node('node-name') self.assertRaises(ValueError, self.driver.attach_volume, node, None) self.assertRaises(ValueError, self.driver.attach_volume, node, None, ex_source='foo/bar', device=None) def test_attach_volume(self): volume = self.driver.ex_get_volume('lcdisk') node = self.driver.ex_get_node('node-name') attach = volume.attach(node) self.assertTrue(attach) def test_detach_volume(self): volume = self.driver.ex_get_volume('lcdisk') node = self.driver.ex_get_node('node-name') # This fails since the node is required detach = volume.detach() self.assertFalse(detach) # This should pass detach = self.driver.detach_volume(volume, node) self.assertTrue(detach) def test_ex_destroy_address_global(self): address = self.driver.ex_get_address('lcaddressglobal', 'global') self.assertEqual(address.name, 'lcaddressglobal') self.assertEqual(address.region, 'global') destroyed = address.destroy() self.assertTrue(destroyed) def test_ex_destroy_address(self): address = self.driver.ex_get_address('lcaddress') destroyed = address.destroy() self.assertTrue(destroyed) def test_ex_destroy_backendservice(self): backendservice = self.driver.ex_get_backendservice('web-service') destroyed = backendservice.destroy() self.assertTrue(destroyed) def test_ex_destroy_healthcheck(self): hc = self.driver.ex_get_healthcheck('lchealthcheck') destroyed = hc.destroy() self.assertTrue(destroyed) def test_ex_delete_image(self): self.assertRaises(ResourceNotFoundError, self.driver.ex_get_image, 'missing-image') self.assertRaises(ResourceNotFoundError, self.driver.ex_delete_image, 'missing-image') image = self.driver.ex_get_image('debian-7') deleted = self.driver.ex_delete_image(image) self.assertTrue(deleted) def test_ex_deprecate_image(self): dep_ts = '2064-03-11T20:18:36.194-07:00' obs_ts = '2074-03-11T20:18:36.194-07:00' del_ts = '2084-03-11T20:18:36.194-07:00' image = self.driver.ex_get_image('debian-7-wheezy-v20131014') deprecated = image.deprecate('debian-7', 'DEPRECATED', deprecated=dep_ts, obsolete=obs_ts, deleted=del_ts) self.assertTrue(deprecated) self.assertEqual(image.extra['deprecated']['deprecated'], dep_ts) self.assertEqual(image.extra['deprecated']['obsolete'], obs_ts) self.assertEqual(image.extra['deprecated']['deleted'], del_ts) def test_ex_destroy_firewall(self): firewall = self.driver.ex_get_firewall('lcfirewall') destroyed = firewall.destroy() self.assertTrue(destroyed) def test_ex_destroy_forwarding_rule(self): fwr = self.driver.ex_get_forwarding_rule('lcforwardingrule') destroyed = fwr.destroy() self.assertTrue(destroyed) def test_ex_destroy_forwarding_rule_global(self): fwr = self.driver.ex_get_forwarding_rule('http-rule', global_rule=True) destroyed = fwr.destroy() self.assertTrue(destroyed) def test_ex_destroy_route(self): route = self.driver.ex_get_route('lcdemoroute') destroyed = route.destroy() self.assertTrue(destroyed) def test_ex_destroy_network(self): network = self.driver.ex_get_network('lcnetwork') destroyed = network.destroy() self.assertTrue(destroyed) def test_destroy_node(self): node = self.driver.ex_get_node('node-name') destroyed = node.destroy() self.assertTrue(destroyed) def test_ex_destroy_multiple_nodes(self): nodes = [] nodes.append(self.driver.ex_get_node('lcnode-000')) nodes.append(self.driver.ex_get_node('lcnode-001')) destroyed = self.driver.ex_destroy_multiple_nodes(nodes) for d in destroyed: self.assertTrue(d) def test_destroy_targethttpproxy(self): proxy = self.driver.ex_get_targethttpproxy('web-proxy') destroyed = proxy.destroy() self.assertTrue(destroyed) def test_destroy_targetinstance(self): targetinstance = self.driver.ex_get_targetinstance('lctargetinstance') self.assertEqual(targetinstance.name, 'lctargetinstance') destroyed = targetinstance.destroy() self.assertTrue(destroyed) def test_destroy_targetpool(self): targetpool = self.driver.ex_get_targetpool('lctargetpool') destroyed = targetpool.destroy() self.assertTrue(destroyed) def test_destroy_urlmap(self): urlmap = self.driver.ex_get_urlmap('web-map') destroyed = urlmap.destroy() self.assertTrue(destroyed) def test_destroy_volume(self): disk = self.driver.ex_get_volume('lcdisk') destroyed = disk.destroy() self.assertTrue(destroyed) def test_ex_set_volume_auto_delete(self): node = self.driver.ex_get_node('node-name') volume = node.extra['boot_disk'] auto_delete = self.driver.ex_set_volume_auto_delete(volume, node) self.assertTrue(auto_delete) def test_destroy_volume_snapshot(self): snapshot = self.driver.ex_get_snapshot('lcsnapshot') destroyed = snapshot.destroy() self.assertTrue(destroyed) def test_ex_get_address_global(self): address_name = 'lcaddressglobal' address = self.driver.ex_get_address(address_name, 'global') self.assertEqual(address.name, address_name) self.assertEqual(address.address, '173.99.99.99') self.assertEqual(address.region, 'global') self.assertEqual(address.extra['status'], 'RESERVED') def test_ex_get_address(self): address_name = 'lcaddress' address = self.driver.ex_get_address(address_name) self.assertEqual(address.name, address_name) self.assertEqual(address.address, '173.255.113.20') self.assertEqual(address.region.name, 'us-central1') self.assertEqual(address.extra['status'], 'RESERVED') def test_ex_get_backendservice(self): web_service = self.driver.ex_get_backendservice('web-service') self.assertEqual(web_service.name, 'web-service') self.assertEqual(web_service.protocol, 'HTTP') self.assertEqual(web_service.port, 80) self.assertEqual(web_service.timeout, 30) self.assertEqual(web_service.healthchecks[0].name, 'basic-check') self.assertEqual(len(web_service.healthchecks), 1) backends = web_service.backends self.assertEqual(len(backends), 2) self.assertEqual(backends[0]['balancingMode'], 'RATE') self.assertEqual(backends[0]['maxRate'], 100) self.assertEqual(backends[0]['capacityScaler'], 1.0) web_service = self.driver.ex_get_backendservice('no-backends') self.assertEqual(web_service.name, 'web-service') self.assertEqual(web_service.healthchecks[0].name, 'basic-check') self.assertEqual(len(web_service.healthchecks), 1) self.assertEqual(len(web_service.backends), 0) def test_ex_get_healthcheck(self): healthcheck_name = 'lchealthcheck' healthcheck = self.driver.ex_get_healthcheck(healthcheck_name) self.assertEqual(healthcheck.name, healthcheck_name) self.assertEqual(healthcheck.port, 8000) self.assertEqual(healthcheck.path, '/lc') def test_ex_get_firewall(self): firewall_name = 'lcfirewall' firewall = self.driver.ex_get_firewall(firewall_name) self.assertEqual(firewall.name, firewall_name) self.assertEqual(firewall.network.name, 'default') self.assertEqual(firewall.source_tags, ['libcloud']) def test_ex_get_forwarding_rule(self): fwr_name = 'lcforwardingrule' fwr = self.driver.ex_get_forwarding_rule(fwr_name) self.assertEqual(fwr.name, fwr_name) self.assertEqual(fwr.extra['portRange'], '8000-8500') self.assertEqual(fwr.targetpool.name, 'lctargetpool') self.assertEqual(fwr.protocol, 'TCP') def test_ex_get_forwarding_rule_global(self): fwr_name = 'http-rule' fwr = self.driver.ex_get_forwarding_rule(fwr_name, global_rule=True) self.assertEqual(fwr.name, fwr_name) self.assertEqual(fwr.extra['portRange'], '80-80') self.assertEqual(fwr.targetpool.name, 'web-proxy') self.assertEqual(fwr.protocol, 'TCP') self.assertEqual(fwr.address, '192.0.2.1') self.assertEqual(fwr.targetpool.name, 'web-proxy') def test_ex_get_image_license(self): image = self.driver.ex_get_image('sles-12-v20141023') self.assertTrue('licenses' in image.extra) self.assertEqual(image.extra['licenses'][0].name, 'sles-12') self.assertTrue(image.extra['licenses'][0].charges_use_fee) def test_ex_get_image(self): partial_name = 'debian-7' image = self.driver.ex_get_image(partial_name) self.assertEqual(image.name, 'debian-7-wheezy-v20131120') # A 'debian-7' image exists in the local project self.assertTrue(image.extra['description'].startswith('Debian')) partial_name = 'debian-6' image = self.driver.ex_get_image(partial_name) self.assertEqual(image.name, 'debian-6-squeeze-v20130926') self.assertTrue(image.extra['description'].startswith('Debian')) partial_name = 'debian-7' image = self.driver.ex_get_image(partial_name, ['debian-cloud']) self.assertEqual(image.name, 'debian-7-wheezy-v20131120') partial_name = 'debian-7' self.assertRaises(ResourceNotFoundError, self.driver.ex_get_image, partial_name, 'suse-cloud', ex_standard_projects=False) def test_ex_get_image_from_family(self): family = 'coreos' description = 'CoreOS beta 522.3.0' image = self.driver.ex_get_image_from_family(family) self.assertEqual(image.name, 'coreos-beta-522-3-0-v20141226') self.assertEqual(image.extra['description'], description) self.assertEqual(image.extra['family'], family) url = ('https://www.googleapis.com/compute/v1/projects/coreos-cloud/' 'global/images/family/coreos') image = self.driver.ex_get_image_from_family(url) self.assertEqual(image.name, 'coreos-beta-522-3-0-v20141226') self.assertEqual(image.extra['description'], description) self.assertEqual(image.extra['family'], family) project_list = ['coreos-cloud'] image = self.driver.ex_get_image_from_family( family, ex_project_list=project_list, ex_standard_projects=False) self.assertEqual(image.name, 'coreos-beta-522-3-0-v20141226') self.assertEqual(image.extra['description'], description) self.assertEqual(image.extra['family'], family) self.assertRaises(ResourceNotFoundError, self.driver.ex_get_image_from_family, 'nofamily') def test_ex_get_route(self): route_name = 'lcdemoroute' route = self.driver.ex_get_route(route_name) self.assertEqual(route.name, route_name) self.assertEqual(route.dest_range, '192.168.25.0/24') self.assertEqual(route.priority, 1000) def test_ex_get_network(self): network_name = 'lcnetwork' network = self.driver.ex_get_network(network_name) self.assertEqual(network.name, network_name) self.assertEqual(network.cidr, '10.11.0.0/16') self.assertEqual(network.extra['gatewayIPv4'], '10.11.0.1') self.assertEqual(network.extra['description'], 'A custom network') def test_ex_get_node(self): node_name = 'node-name' zone = 'us-central1-a' node = self.driver.ex_get_node(node_name, zone) self.assertEqual(node.name, node_name) self.assertEqual(node.size, 'n1-standard-1') removed_node = 'libcloud-lb-demo-www-002' self.assertRaises(ResourceNotFoundError, self.driver.ex_get_node, removed_node, 'us-central1-b') missing_node = 'dummy-node' self.assertRaises(ResourceNotFoundError, self.driver.ex_get_node, missing_node, 'all') def test_ex_get_project(self): project = self.driver.ex_get_project() self.assertEqual(project.name, 'project_name') networks_quota = project.quotas[1] self.assertEqual(networks_quota['usage'], 3) self.assertEqual(networks_quota['limit'], 5) self.assertEqual(networks_quota['metric'], 'NETWORKS') self.assertTrue( 'fingerprint' in project.extra['commonInstanceMetadata']) self.assertTrue('items' in project.extra['commonInstanceMetadata']) self.assertTrue('usageExportLocation' in project.extra) self.assertTrue('bucketName' in project.extra['usageExportLocation']) self.assertEqual(project.extra['usageExportLocation']['bucketName'], 'gs://graphite-usage-reports') def test_ex_add_access_config(self): self.assertRaises(ValueError, self.driver.ex_add_access_config, 'node', 'name', 'nic') node = self.driver.ex_get_node('node-name', 'us-central1-a') self.assertTrue(self.driver.ex_add_access_config(node, 'foo', 'bar')) def test_ex_delete_access_config(self): self.assertRaises(ValueError, self.driver.ex_add_access_config, 'node', 'name', 'nic') node = self.driver.ex_get_node('node-name', 'us-central1-a') self.assertTrue( self.driver.ex_delete_access_config(node, 'foo', 'bar')) def test_ex_set_usage_export_bucket(self): self.assertRaises(ValueError, self.driver.ex_set_usage_export_bucket, 'foo') bucket_name = 'gs://foo' self.driver.ex_set_usage_export_bucket(bucket_name) bucket_name = 'https://www.googleapis.com/foo' self.driver.ex_set_usage_export_bucket(bucket_name) def test__set_project_metadata(self): self.assertEqual( len(self.driver._set_project_metadata(None, False, "")), 0) # 'delete' metadata, but retain current sshKeys md = self.driver._set_project_metadata(None, False, "this is a test") self.assertEqual(len(md), 1) self.assertEqual(md[0]['key'], 'sshKeys') self.assertEqual(md[0]['value'], 'this is a test') # 'delete' metadata *and* any existing sshKeys md = self.driver._set_project_metadata(None, True, "this is a test") self.assertEqual(len(md), 0) # add new metadata, keep existing sshKeys, since the new value also # has 'sshKeys', we want the final struct to only have one ke/value # of sshKeys and it should be the "current_keys" gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'sshKeys', 'value': 'another test'}]} md = self.driver._set_project_metadata(gce_md, False, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, 'this is a test') self.assertEqual(count, 1) # add new metadata, overwrite existing sshKeys, in this case, the # existing 'sshKeys' value should be replaced gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'sshKeys', 'value': 'another test'}]} md = self.driver._set_project_metadata(gce_md, True, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, 'another test') self.assertEqual(count, 1) # add new metadata, remove existing sshKeys. in this case, we had an # 'sshKeys' entry, but it will be removed entirely gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'nokeys', 'value': 'two'}]} md = self.driver._set_project_metadata(gce_md, True, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, '') self.assertEqual(count, 0) def test_ex_set_common_instance_metadata(self): # test non-dict self.assertRaises(ValueError, self.driver.ex_set_common_instance_metadata, ['bad', 'type']) # test standard python dict pydict = {'key': 'pydict', 'value': 1} self.driver.ex_set_common_instance_metadata(pydict) # test GCE badly formatted dict bad_gcedict = {'items': 'foo'} self.assertRaises(ValueError, self.driver.ex_set_common_instance_metadata, bad_gcedict) # test gce formatted dict gcedict = {'items': [{'key': 'gcedict1', 'value': 'v1'}, {'key': 'gcedict2', 'value': 'v2'}]} self.driver.ex_set_common_instance_metadata(gcedict) def test_ex_set_node_metadata(self): node = self.driver.ex_get_node('node-name', 'us-central1-a') # test non-dict self.assertRaises(ValueError, self.driver.ex_set_node_metadata, node, ['bad', 'type']) # test standard python dict pydict = {'key': 'pydict', 'value': 1} self.driver.ex_set_node_metadata(node, pydict) # test GCE badly formatted dict bad_gcedict = {'items': 'foo'} self.assertRaises(ValueError, self.driver.ex_set_node_metadata, node, bad_gcedict) # test gce formatted dict gcedict = {'items': [{'key': 'gcedict1', 'value': 'v1'}, {'key': 'gcedict2', 'value': 'v2'}]} self.driver.ex_set_node_metadata(node, gcedict) def test_ex_get_region(self): region_name = 'us-central1' region = self.driver.ex_get_region(region_name) self.assertEqual(region.name, region_name) self.assertEqual(region.status, 'UP') self.assertEqual(region.zones[0].name, 'us-central1-a') def test_ex_get_size(self): size_name = 'n1-standard-1' size = self.driver.ex_get_size(size_name) self.assertEqual(size.name, size_name) self.assertEqual(size.extra['zone'].name, 'us-central1-a') self.assertEqual(size.disk, 10) self.assertEqual(size.ram, 3840) self.assertEqual(size.extra['guestCpus'], 1) def test_ex_get_targethttpproxy(self): targethttpproxy_name = 'web-proxy' targethttpproxy = self.driver.ex_get_targethttpproxy( targethttpproxy_name) self.assertEqual(targethttpproxy.name, targethttpproxy_name) self.assertEqual(targethttpproxy.urlmap.name, 'web-map') def test_ex_get_targetinstance(self): targetinstance_name = 'lctargetinstance' targetinstance = self.driver.ex_get_targetinstance(targetinstance_name) self.assertEqual(targetinstance.name, targetinstance_name) self.assertEqual(targetinstance.zone.name, 'us-central1-a') def test_ex_get_targetpool(self): targetpool_name = 'lctargetpool' targetpool = self.driver.ex_get_targetpool(targetpool_name) self.assertEqual(targetpool.name, targetpool_name) self.assertEqual(len(targetpool.nodes), 2) self.assertEqual(targetpool.region.name, 'us-central1') def test_ex_get_instancegroupmanager(self): igmgr_name = 'myinstancegroup' igmgr = self.driver.ex_get_instancegroupmanager(igmgr_name, 'us-central1-b') self.assertEqual(igmgr.name, igmgr_name) self.assertEqual(igmgr.size, 4) self.assertEqual(igmgr.zone.name, 'us-central1-b') # search all zones igmgr = self.driver.ex_get_instancegroupmanager(igmgr_name) self.assertEqual(igmgr.name, igmgr_name) self.assertEqual(igmgr.size, 4) self.assertEqual(igmgr.zone.name, 'us-central1-a') def test_ex_get_instancetemplate(self): instancetemplate_name = 'my-instance-template1' instancetemplate = self.driver.ex_get_instancetemplate( instancetemplate_name) self.assertEqual(instancetemplate.name, instancetemplate_name) self.assertEqual(instancetemplate.extra['properties']['machineType'], 'n1-standard-1') def test_ex_get_snapshot(self): snapshot_name = 'lcsnapshot' snapshot = self.driver.ex_get_snapshot(snapshot_name) self.assertEqual(snapshot.name, snapshot_name) self.assertEqual(snapshot.size, '10') self.assertEqual(snapshot.status, 'READY') def test_ex_get_urlmap(self): urlmap_name = 'web-map' urlmap = self.driver.ex_get_urlmap(urlmap_name) self.assertEqual(urlmap.name, urlmap_name) self.assertEqual(urlmap.default_service.name, 'web-service') def test_ex_get_volume(self): volume_name = 'lcdisk' volume = self.driver.ex_get_volume(volume_name) self.assertEqual(volume.name, volume_name) self.assertEqual(volume.size, '10') self.assertEqual(volume.extra['status'], 'READY') self.assertEqual(volume.extra['type'], 'pd-ssd') def test_ex_get_disktype(self): disktype_name = 'pd-ssd' disktype_zone = 'us-central1-a' disktype = self.driver.ex_get_disktype(disktype_name, disktype_zone) self.assertEqual(disktype.name, disktype_name) self.assertEqual(disktype.zone.name, disktype_zone) self.assertEqual(disktype.extra['description'], 'SSD Persistent Disk') self.assertEqual(disktype.extra['valid_disk_size'], '10GB-10240GB') self.assertEqual(disktype.extra['default_disk_size_gb'], '100') def test_ex_get_zone(self): zone_name = 'us-central1-b' zone = self.driver.ex_get_zone(zone_name) self.assertEqual(zone.name, zone_name) self.assertFalse(zone.time_until_mw) self.assertFalse(zone.next_mw_duration) zone_no_mw = self.driver.ex_get_zone('us-central1-a') self.assertEqual(zone_no_mw.time_until_mw, None) class GCEMockHttp(MockHttp): fixtures = ComputeFileFixtures('gce') json_hdr = {'content-type': 'application/json; charset=UTF-8'} def _get_method_name(self, type, use_param, qs, path): api_path = '/compute/%s' % API_VERSION project_path = '/projects/%s' % GCE_KEYWORD_PARAMS['project'] path = path.replace(api_path, '') # This replace is separate, since there is a call with a different # project name path = path.replace(project_path, '') # The path to get project information is the base path, so use a fake # '/project' path instead if not path: path = '/project' method_name = super(GCEMockHttp, self)._get_method_name( type, use_param, qs, path) return method_name def _setUsageExportBucket(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('setUsageExportBucket_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_custom_node(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_custom_node.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_setMachineType_notstopped( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_setMachineType_notstopped.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_custom_node_setMachineType( self, method, url, body, header): body = { "error": { "errors": [ { "domain": "global", "reason": "invalid", "message": "Invalid value for field 'resource.machineTypes': " "'projects/project_name/zones/us-central1-a/machineTypes/custom-1-61440'. Resource was not found.", } ], "code": 400, "message": "Invalid value for field 'resource.machineTypes': " "'projects/project_name/zones/us-central1-a/machineTypes/custom-1-61440'. Resource was not found." } } return (httplib.BAD_REQUEST, body, self.json_hdr, httplib.responses[httplib.BAD_REQUEST]) def _zones_us_central1_a_instances_stopped_node_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_startnode(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_startnode.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node_start(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_start.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node_stop(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_stop.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_stopnode(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_stopnode.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_stop(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_stop.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setMetadata(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setMetadata_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _setCommonInstanceMetadata(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('setCommonInstanceMetadata_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_subnetworks(self, method, url, body, headers): body = self.fixtures.load('aggregated_subnetworks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_addresses(self, method, url, body, headers): body = self.fixtures.load('aggregated_addresses.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_diskTypes(self, method, url, body, headers): body = self.fixtures.load('aggregated_disktypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_disks(self, method, url, body, headers): body = self.fixtures.load('aggregated_disks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_forwardingRules(self, method, url, body, headers): body = self.fixtures.load('aggregated_forwardingRules.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_instances(self, method, url, body, headers): body = self.fixtures.load('aggregated_instances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_instanceGroupManagers(self, method, url, body, headers): body = self.fixtures.load('aggregated_instanceGroupManagers.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_machineTypes(self, method, url, body, headers): body = self.fixtures.load('aggregated_machineTypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_targetInstances(self, method, url, body, headers): body = self.fixtures.load('aggregated_targetInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_targetPools(self, method, url, body, headers): body = self.fixtures.load('aggregated_targetPools.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_backendServices_post.json') else: backend_name = getattr(self.test, 'backendservices_mock', 'web-service') body = self.fixtures.load('global_backendServices-%s.json' % backend_name) return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices_no_backends(self, method, url, body, headers): body = self.fixtures.load('global_backendServices_no_backends.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices_web_service(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_backendServices_web_service_delete.json') else: body = self.fixtures.load( 'global_backendServices_web_service.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_forwardingRules(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_forwardingRules_post.json') else: body = self.fixtures.load('global_forwardingRules.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_forwardingRules_http_rule(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_forwardingRules_http_rule_delete.json') else: body = self.fixtures.load('global_forwardingRules_http_rule.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_httpHealthChecks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_httpHealthChecks_post.json') else: body = self.fixtures.load('global_httpHealthChecks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_httpHealthChecks_default_health_check(self, method, url, body, headers): body = self.fixtures.load('global_httpHealthChecks_basic-check.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_httpHealthChecks_basic_check(self, method, url, body, headers): body = self.fixtures.load('global_httpHealthChecks_basic-check.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_httpHealthChecks_libcloud_lb_demo_healthcheck( self, method, url, body, headers): body = self.fixtures.load( 'global_httpHealthChecks_libcloud-lb-demo-healthcheck.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_httpHealthChecks_lchealthcheck(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_httpHealthChecks_lchealthcheck_delete.json') elif method == 'PUT': body = self.fixtures.load( 'global_httpHealthChecks_lchealthcheck_put.json') else: body = self.fixtures.load( 'global_httpHealthChecks_lchealthcheck.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_firewalls(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_firewalls_post.json') else: body = self.fixtures.load('global_firewalls.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_firewalls_lcfirewall(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_firewalls_lcfirewall_delete.json') elif method == 'PUT': body = self.fixtures.load('global_firewalls_lcfirewall_put.json') else: body = self.fixtures.load('global_firewalls_lcfirewall.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_images(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_images_post.json') else: body = self.fixtures.load('global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_images_debian_7_wheezy_v20131120(self, method, url, body, headers): body = self.fixtures.load( 'global_images_debian_7_wheezy_v20131120_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_images_debian_7_wheezy_v20131014_deprecate(self, method, url, body, headers): body = self.fixtures.load( 'global_images_debian_7_wheezy_v20131014_deprecate.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_images_family_coreos(self, method, url, body, headers): body = self.fixtures.load('global_images_family_notfound.json') return (httplib.NOT_FOUND, body, self.json_hdr, httplib.responses[httplib.NOT_FOUND]) def _global_images_family_nofamily(self, method, url, body, headers): body = self.fixtures.load('global_images_family_notfound.json') return (httplib.NOT_FOUND, body, self.json_hdr, httplib.responses[httplib.NOT_FOUND]) def _global_routes(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_routes_post.json') else: body = self.fixtures.load('global_routes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_networks_post.json') else: body = self.fixtures.load('global_networks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_custom_network(self, method, url, body, headers): body = self.fixtures.load('global_networks_custom_network.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_cf(self, method, url, body, headers): body = self.fixtures.load('global_networks_cf.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_default(self, method, url, body, headers): body = self.fixtures.load('global_networks_default.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_libcloud_demo_network(self, method, url, body, headers): body = self.fixtures.load('global_networks_libcloud-demo-network.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_libcloud_demo_europe_network(self, method, url, body, headers): body = self.fixtures.load( 'global_networks_libcloud-demo-europe-network.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_routes_lcdemoroute(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load('global_routes_lcdemoroute_delete.json') else: body = self.fixtures.load('global_routes_lcdemoroute.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_networks_lcnetwork(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load('global_networks_lcnetwork_delete.json') else: body = self.fixtures.load('global_networks_lcnetwork.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_snapshots(self, method, url, body, headers): body = self.fixtures.load('global_snapshots.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_snapshots_lcsnapshot(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_snapshots_lcsnapshot_delete.json') else: body = self.fixtures.load('global_snapshots_lcsnapshot.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_setUsageExportBucket(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_setUsageExportBucket.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_setCommonInstanceMetadata( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_setCommonInstanceMetadata.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_backendServices_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_backendServices_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_backendServices_web_service_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_backendServices_web_service_delete' '.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_forwardingRules_http_rule_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_forwardingRules_http_rule_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_forwardingRules_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_forwardingRules_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_httpHealthChecks_lchealthcheck_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_httpHealthChecks_lchealthcheck_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_images_debian7_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_images_debian7_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_httpHealthChecks_lchealthcheck_put( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_httpHealthChecks_lchealthcheck_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_httpHealthChecks_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_httpHealthChecks_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_firewalls_lcfirewall_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_firewalls_lcfirewall_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_firewalls_lcfirewall_put( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_firewalls_lcfirewall_put.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_firewalls_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_firewalls_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_routes_lcdemoroute_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_routes_lcdemoroute_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_networks_lcnetwork_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_networks_lcnetwork_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_routes_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_routes_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_networks_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_networks_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_snapshots_lcsnapshot_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_snapshots_lcsnapshot_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_image_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_image_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_addresses_lcaddressglobal_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_addresses_lcaddressglobal_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_targetHttpProxies_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_targetHttpProxies_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_targetHttpProxies_web_proxy_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_targetHttpProxies_web_proxy_delete' '.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_urlMaps_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_urlMaps_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_urlMaps_web_map_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_urlMaps_web_map_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_targetHttpProxies(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_targetHttpProxies_post.json') else: body = self.fixtures.load('global_targetHttpProxies.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_targetHttpProxies_web_proxy(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_targetHttpProxies_web_proxy_delete.json') else: body = self.fixtures.load( 'global_targetHttpProxies_web_proxy.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_urlMaps(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_urlMaps_post.json') else: body = self.fixtures.load('global_urlMaps.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_urlMaps_web_map(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load('global_urlMaps_web_map_delete.json') else: body = self.fixtures.load('global_urlMaps_web_map.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_east1_subnetworks_cf_972cf02e6ad49113(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-east1_subnetworks_cf_972cf02e6ad49113.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_subnetworks_cf_972cf02e6ad49112(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_subnetworks_cf_972cf02e6ad49112.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_addresses_lcaddress_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_addresses_lcaddress_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_addresses_post(self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_addresses_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_addresses_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_addresses_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_subnetworks_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_subnetworks_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_forwardingRules_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_forwardingRules_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_forwardingRules_lcforwardingrule_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_deleteAccessConfig( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_serialPort(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_node_name_getSerialOutput.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_addAccessConfig( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_setMetadata_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us_central1_a_node_name_setMetadata_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_targetInstances_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_targetInstances_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_addAccessConfig_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_done.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_deleteAccessConfig_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_done.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_targetInstances_lctargetinstance_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_targetInstances_lctargetinstance_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_removeHealthCheck_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_addHealthCheck_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_removeInstance_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lb_pool_setBackup_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lb_pool_setBackup_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_addInstance_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_disks_lcdisk_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setDiskAutoDelete( self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setDiskAutoDelete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_volume_auto_delete( self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_volume_auto_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_createSnapshot_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_disks_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_000_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_001_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_delete( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node-name_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_attachDisk_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_detachDisk_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_setTags_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_reset_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_node-name_reset_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_operations_operation_zones_europe_west1_a_instances_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_europe-west1-a_instances_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_zones_us-central1-a_instances_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _project(self, method, url, body, headers): body = self.fixtures.load('project.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_windows_cloud_global_licenses_windows_server_2008_r2_dc( self, method, url, body, headers): body = self.fixtures.load( 'projects_windows-cloud_global_licenses_windows_server_2008_r2_dc.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_suse_cloud_global_licenses_sles_11(self, method, url, body, headers): body = self.fixtures.load( 'projects_suse-cloud_global_licenses_sles_11.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_rhel_cloud_global_licenses_rhel_7_server(self, method, url, body, headers): body = self.fixtures.load( 'projects_rhel-cloud_global_licenses_rhel_server.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_suse_cloud_global_licenses_sles_12(self, method, url, body, headers): body = self.fixtures.load( 'projects_suse-cloud_global_licenses_sles_12.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_windows_cloud_global_images(self, method, url, body, header): body = self.fixtures.load('projects_windows-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_rhel_cloud_global_images(self, method, url, body, header): body = self.fixtures.load('projects_rhel-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_gce_nvme_global_images(self, method, url, body, header): body = self.fixtures.load('projects_gce-nvme_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_coreos_cloud_global_images(self, method, url, body, header): body = self.fixtures.load('projects_coreos-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_coreos_cloud_global_images_family_coreos(self, method, url, body, header): body = self.fixtures.load( 'projects_coreos-cloud_global_images_family_coreos.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_opensuse_cloud_global_images(self, method, url, body, header): body = self.fixtures.load('projects_opensuse-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_google_containers_global_images(self, method, url, body, header): body = self.fixtures.load( 'projects_google-containers_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_ubuntu_os_cloud_global_images(self, method, url, body, header): body = self.fixtures.load( 'projects_ubuntu-os-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_centos_cloud_global_images(self, method, url, body, header): body = self.fixtures.load('projects_centos-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_suse_cloud_global_images(self, method, url, body, headers): body = self.fixtures.load('projects_suse-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _projects_debian_cloud_global_images(self, method, url, body, headers): body = self.fixtures.load('projects_debian-cloud_global_images.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions(self, method, url, body, headers): if 'pageToken' in url or 'filter' in url: body = self.fixtures.load('regions-paged-2.json') elif 'maxResults' in url: body = self.fixtures.load('regions-paged-1.json') else: body = self.fixtures.load('regions.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_addresses(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_addresses_post.json') else: body = self.fixtures.load('global_addresses.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_europe_west1(self, method, url, body, headers): body = self.fixtures.load('regions_europe-west1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_asia_east1(self, method, url, body, headers): body = self.fixtures.load('regions_asia-east1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1(self, method, url, body, headers): body = self.fixtures.load('regions_us-central1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_east1(self, method, url, body, headers): body = self.fixtures.load('regions_us-east1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_subnetworks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'regions_us-central1_subnetworks_post.json') else: body = self.fixtures.load('regions_us-central1_subnetworks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_addresses(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'regions_us-central1_addresses_post.json') else: body = self.fixtures.load('regions_us-central1_addresses.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_addresses_lcaddressglobal(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_addresses_lcaddressglobal_delete.json') else: body = self.fixtures.load('global_addresses_lcaddressglobal.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_addresses_lcaddress(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'regions_us-central1_addresses_lcaddress_delete.json') else: body = self.fixtures.load( 'regions_us-central1_addresses_lcaddress.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_forwardingRules(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'regions_us-central1_forwardingRules_post.json') else: body = self.fixtures.load( 'regions_us-central1_forwardingRules.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_forwardingRules_libcloud_lb_demo_lb( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_forwardingRules_lcforwardingrule( self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'regions_us-central1_forwardingRules_lcforwardingrule_delete.json') else: body = self.fixtures.load( 'regions_us-central1_forwardingRules_lcforwardingrule.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_targetInstances(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'zones_us-central1-a_targetInstances_post.json') else: body = self.fixtures.load( 'zones_us-central1-a_targetInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'regions_us-central1_targetPools_post.json') else: body = self.fixtures.load('regions_us-central1_targetPools.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_targetInstances_lctargetinstance( self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'zones_us-central1-a_targetInstances_lctargetinstance_delete.json') else: body = self.fixtures.load( 'zones_us-central1-a_targetInstances_lctargetinstance.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lb_pool_getHealth(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lb_pool_getHealth.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lb_pool(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lb_pool.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_delete.json') else: body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool_sticky(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_sticky.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_backup_pool(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_backup_pool.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_libcloud_lb_demo_lb_tp( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool_removeHealthCheck( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool_addHealthCheck( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool_removeInstance( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_removeInstance_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lb_pool_setBackup(self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lb_pool_setBackup_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _regions_us_central1_targetPools_lctargetpool_addInstance( self, method, url, body, headers): body = self.fixtures.load( 'regions_us-central1_targetPools_lctargetpool_addInstance_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones(self, method, url, body, headers): body = self.fixtures.load('zones.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_asia_east_1a(self, method, url, body, headers): body = self.fixtures.load('zones_asia-east1-a.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_asia_east1_b(self, method, url, body, headers): body = self.fixtures.load('zones_asia-east1-b.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_east1_b(self, method, url, body, headers): body = self.fixtures.load('zones_us-east1-b.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_diskTypes(self, method, url, body, headers): body = self.fixtures.load('zones_us-central1-a_diskTypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_diskTypes_pd_standard(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_diskTypes_pd_standard.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_diskTypes_pd_ssd(self, method, url, body, headers): body = self.fixtures.load('zones_us-central1-a_diskTypes_pd_ssd.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('zones_us-central1-a_disks_post.json') else: body = self.fixtures.load('zones_us-central1-a_disks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks_lcdisk(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'zones_us-central1-a_disks_lcdisk_delete.json') else: body = self.fixtures.load('zones_us-central1-a_disks_lcdisk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks_lcdisk_createSnapshot(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_disks_lcdisk_createSnapshot_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks_node_name(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks_lcnode_000(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_disks_lcnode_001(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_disks_libcloud_lb_demo_www_000(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_disks_libcloud_lb_demo_www_001(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_disks_libcloud_lb_demo_www_002(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central2_a_disks_libcloud_demo_boot_disk(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central2_a_disks_libcloud_demo_np_node(self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central2_a_disks_libcloud_demo_multiple_nodes_000( self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central2_a_disks_libcloud_demo_multiple_nodes_001( self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_disks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('zones_us-central1-a_disks_post.json') else: body = self.fixtures.load('zones_us-central1-a_disks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_disks_libcloud_demo_europe_np_node( self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_disks_libcloud_demo_europe_boot_disk( self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_disks_libcloud_demo_europe_multiple_nodes_000( self, method, url, body, headers): body = self.fixtures.load('generic_disk.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_instances(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'zones_europe-west1-a_instances_post.json') else: body = self.fixtures.load('zones_europe-west1-a_instances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_diskTypes_pd_standard(self, method, url, body, headers): body = self.fixtures.load( 'zones_europe-west1-a_diskTypes_pd_standard.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( 'zones_us-central1-a_instances_post.json') else: body = self.fixtures.load('zones_us-central1-a_instances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_sn_node_name(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_sn-node-name.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'zones_us-central1-a_instances_node-name_delete.json') else: body = self.fixtures.load( 'zones_us-central1-a_instances_node-name.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_attachDisk(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_node-name_attachDisk_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_detachDisk(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_node-name_detachDisk_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setTags(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_node-name_setTags_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_reset(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instances_node-name_reset_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_lcnode_000(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'zones_us-central1-a_instances_lcnode-000_delete.json') else: body = self.fixtures.load( 'zones_us-central1-a_instances_lcnode-000.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_lcnode_001(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'zones_us-central1-a_instances_lcnode-001_delete.json') else: body = self.fixtures.load( 'zones_us-central1-a_instances_lcnode-001.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instances_libcloud_lb_nopubip_001( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instances_libcloud-lb-nopubip-001.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instances_libcloud_lb_demo_www_000( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instances_libcloud-lb-demo-www-000.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instances_libcloud_lb_demo_www_001( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instances_libcloud-lb-demo-www-001.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instances_libcloud_lb_demo_www_002( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instances_libcloud-lb-demo-www-002.json') return (httplib.NOT_FOUND, body, self.json_hdr, httplib.responses[httplib.NOT_FOUND]) def _zones_us_central1_a(self, method, url, body, headers): body = self.fixtures.load('zones_us-central1-a.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_machineTypes(self, method, url, body, headers): body = self.fixtures.load('zones_us-central1-a_machineTypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_europe_west1_a_machineTypes_n1_standard_1(self, method, url, body, headers): body = self.fixtures.load( 'zones_europe-west1-a_machineTypes_n1-standard-1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_machineTypes_n1_standard_1(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_machineTypes_n1-standard-1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myinstancegroup(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instanceGroup_myinstancegroup.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myinstancegroup2(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instanceGroup_myinstancegroup2.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instanceGroups_myinstancegroup(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instanceGroup_myinstancegroup.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_east1_b_instanceGroups_myinstancegroup(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-east1-b_instanceGroup_myinstancegroup.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroupManagers_myinstancegroup( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instanceGroupManagers_myinstancegroup.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_b_instanceGroupManagers_myinstancegroup( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-b_instanceGroupManagers_myinstancegroup.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroupManagers_myinstancegroup_listManagedInstances( self, method, url, body, headers): body = self.fixtures.load( '_zones_us_central1_a_instanceGroupManagers_myinstancegroup_listManagedInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_east1_b_instanceGroupManagers(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-east1-b_instanceGroupManagers.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroupManagers(self, method, url, body, headers): # do an insert. Returns an operations link, which then # returns the MIG URI. if method == 'POST': body = self.fixtures.load( 'zones_us-central1-a_instanceGroupManagers_insert.json') else: body = self.fixtures.load( 'zones_us-central1-a_instanceGroupManagers.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroupManagers_insert_post( self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_operations_operation_zones_us-central1-a_instanceGroupManagers_insert_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_instanceTemplates(self, method, url, body, headers): if method == 'POST': # insert body = self.fixtures.load('global_instanceTemplates_insert.json') else: # get or list call body = self.fixtures.load('global_instanceTemplates.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_instanceTemplates_my_instance_template1_insert( self, method, url, body, headers): """ Redirects from _global_instanceTemplates """ body = self.fixtures.load( 'operations_operation_global_instanceTemplates_insert.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_instanceTemplates_my_instance_template1(self, method, url, body, headers): body = self.fixtures.load( 'global_instanceTemplates_my_instance_template1.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_autoscalers(self, method, url, body, headers): body = self.fixtures.load('aggregated_autoscalers.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_sslCertificates(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_sslcertificates_post.json') else: body = self.fixtures.load('global_sslcertificates.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_sslCertificates_example(self, method, url, body, headers): body = self.fixtures.load('global_sslcertificates_example.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_operations_operation_global_sslcertificates_post( self, method, url, body, headers): body = self.fixtures.load( 'operations_operation_global_sslcertificates_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myname(self, method, url, body, headers): if method == 'DELETE': # delete body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname_delete.json') else: # get or list call body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroups_myname_delete( self, method, url, body, headers): """ Redirects from _zones_us_central1_a_instanceGroups_myname """ body = self.fixtures.load( 'operations_operation_zones_us_central1_a_instanceGroups_myname_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups(self, method, url, body, headers): if method == 'POST': # insert body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_insert.json') else: # get or list call body = self.fixtures.load( 'zones_us_central1_a_instanceGroups.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroups_myname_insert( self, method, url, body, headers): """ Redirects from _zones_us_central1_a_instanceGroups """ body = self.fixtures.load( 'operations_operation_zones_us_central1_a_instanceGroups_insert.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myname_listInstances( self, method, url, body, headers): # POST body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname_listInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myname_addInstances( self, method, url, body, headers): # POST body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname_addInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroups_myname_addInstances( self, method, url, body, headers): """ Redirects from _zones_us_central1_a_instanceGroups_myname_addInstances """ body = self.fixtures.load( 'operations_operation_zones_us_central1_a_instanceGroups_myname_addInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myname_removeInstances( self, method, url, body, headers): # POST body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname_removeInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroups_myname_removeInstances( self, method, url, body, headers): """ Redirects from _zones_us_central1_a_instanceGroups_myname_removeInstances """ body = self.fixtures.load( 'operations_operation_zones_us_central1_a_instanceGroups_myname_removeInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instanceGroups_myname_setNamedPorts( self, method, url, body, headers): # POST body = self.fixtures.load( 'zones_us_central1_a_instanceGroups_myname_setNamedPorts.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_zones_us_central1_a_instanceGroups_myname_setNamedPorts( self, method, url, body, headers): """ Redirects from _zones_us_central1_a_instanceGroups_myname_setNamedPorts """ body = self.fixtures.load( 'operations_operation_zones_us_central1_a_instanceGroups_myname_setNamedPorts.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_medone.py0000664000175000017500000000241713153541406023534 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.medone import MedOneNodeDriver from libcloud.test.compute.test_dimensiondata_v2_3 import DimensionDataMockHttp, DimensionData_v2_3_Tests class MedOneTests(DimensionData_v2_3_Tests, unittest.TestCase): def setUp(self): MedOneNodeDriver.connectionCls.conn_class = DimensionDataMockHttp MedOneNodeDriver.connectionCls.active_api_version = '2.3' DimensionDataMockHttp.type = None self.driver = MedOneNodeDriver('user', 'password') apache-libcloud-2.2.1/libcloud/test/compute/test_packet.py0000664000175000017500000001733113153541406023535 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Maintainer: Aaron Welch # Based on code written by Jed Smith who based it on # code written by Alex Polvi # import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.packet import PacketNodeDriver from libcloud.compute.base import Node, KeyPair from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class PacketTest(unittest.TestCase, TestCaseMixin): def setUp(self): PacketNodeDriver.connectionCls.conn_class = PacketMockHttp self.driver = PacketNodeDriver('foo') def test_list_nodes(self): nodes = self.driver.list_nodes('project-id') self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node.id, '1e52437e-bbbb-cccc-dddd-74a9dfd3d3bb') self.assertEqual(node.name, 'test-node') self.assertEqual(node.state, NodeState.RUNNING) self.assertTrue('147.75.255.255' in node.public_ips) self.assertTrue('2604:EEEE::EE' in node.public_ips) self.assertTrue('10.0.0.255' in node.private_ips) self.assertEqual(node.extra['created_at'], '2015-05-03T15:50:49Z') self.assertEqual(node.extra['updated_at'], '2015-05-03T16:00:08Z') self.assertEqual(node.extra['billing_cycle'], 'hourly') self.assertEqual(node.extra['locked'], False) self.assertEqual(node.size.id, 'baremetal_1') self.assertEqual(node.size.name, 'Type 1') self.assertEqual(node.size.ram, 16384) self.assertEqual(node.size.disk, 240) self.assertEqual(node.size.price, 0.4) self.assertEqual(node.size.extra['line'], 'baremetal') self.assertEqual(node.image.id, 'ubuntu_14_04') self.assertEqual(node.image.name, 'Ubuntu 14.04 LTS') self.assertEqual(node.image.extra['distro'], 'ubuntu') self.assertEqual(node.image.extra['version'], '14.04') def test_list_nodes_response(self): nodes = self.driver.list_nodes('project-id') self.assertTrue(isinstance(nodes, list)) for node in nodes: self.assertTrue(isinstance(node, Node)) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 1) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 4) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 1) def test_create_node(self): node = self.driver.create_node(ex_project_id="project-id", name="node-name", size=self.driver.list_sizes()[0], image=self.driver.list_images()[0], location=self.driver.list_locations()[ 0]) self.assertTrue(isinstance(node, Node)) def test_create_node_response(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] node = self.driver.create_node(ex_project_id="project-id", name='node-name', image=image, size=size, location=location) self.assertTrue(isinstance(node, Node)) def test_reboot_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.reboot_node(node) def test_reboot_node_response(self): node = self.driver.list_nodes('project-id')[0] self.driver.reboot_node(node) def test_destroy_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.destroy_node(node) def test_destroy_node_response(self): node = self.driver.list_nodes('project-id')[0] self.driver.destroy_node(node) def test_list_key_pairs(self): keys = self.driver.list_key_pairs() self.assertEqual(len(keys), 3) def test_create_key_pair(self): key = self.driver.create_key_pair(name="sshkey-name", public_key="ssh-rsa AAAAB3NzaC1yc2EA\ AAADAQABAAABAQDI4pIqzpb5g3992h+yr527VRcaB68KE4vPjWPPoiQws49KIs2NMcOzS9QE4641uW\ 1u5ML2HgQdfYKMF/YFGnI1Y6xV637DjhDyZYV9LasUH49npSSJjsBcsk9JGfUpNAOdcgpFzK8V90ei\ OrOC5YncxdwwG8pwjFI9nNVPCl4hYEu1iXdyysHvkFfS2fklsNjLWrzfafPlaen+qcBxygCA0sFdW/\ 7er50aJeghdBHnE2WhIKLUkJxnKadznfAge7oEe+3LLAPfP+3yHyvp2+H0IzmVfYvAjnzliYetqQ8p\ g5ZW2BiJzvqz5PebGS70y/ySCNW1qQmJURK/Wc1bt9en root@libcloud") self.assertTrue(isinstance(key, KeyPair)) def test_delete_key_pair(self): key = self.driver.list_key_pairs()[0] self.driver.delete_key_pair(key) class PacketMockHttp(MockHttp): fixtures = ComputeFileFixtures('packet') def _facilities(self, method, url, body, headers): body = self.fixtures.load('facilities.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _plans(self, method, url, body, headers): body = self.fixtures.load('plans.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _operating_systems(self, method, url, body, headers): body = self.fixtures.load('operatingsystems.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ssh_keys(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('sshkeys.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('sshkey_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ssh_keys_2c1a7f23_1dc6_4a37_948e_d9857d9f607c(self, method, url, body, headers): if method == 'DELETE': return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _projects_project_id_devices(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('device_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'GET': body = self.fixtures.load('devices.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb(self, method, url, body, headers): if method == 'DELETE': return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_actions( self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_rackspace.py0000664000175000017500000001756013153541406024226 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import method_type from libcloud.utils.py3 import httplib from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver from libcloud.compute.drivers.rackspace import RackspaceNodeDriver from libcloud.test.compute.test_openstack import OpenStack_1_0_Tests from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests, \ OpenStack_1_1_MockHttp from libcloud.pricing import clear_pricing_data from libcloud.test.secrets import RACKSPACE_NOVA_PARAMS from libcloud.test.secrets import RACKSPACE_PARAMS DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK, Provider.RACKSPACE_NOVA_BETA, Provider.RACKSPACE_NOVA_DFW, Provider.RACKSPACE_NOVA_LON, Provider.RACKSPACE_NOVA_ORD] class RackspaceusFirstGenUsTests(OpenStack_1_0_Tests): should_list_locations = True should_have_pricing = True driver_klass = RackspaceFirstGenNodeDriver driver_type = RackspaceFirstGenNodeDriver driver_args = RACKSPACE_PARAMS driver_kwargs = {'region': 'us'} def test_error_is_thrown_on_accessing_old_constant(self): for provider in DEPRECATED_RACKSPACE_PROVIDERS: try: get_driver(provider) except Exception: e = sys.exc_info()[1] self.assertTrue(str(e).find('has been removed') != -1) else: self.fail('Exception was not thrown') def test_list_sizes_pricing(self): sizes = self.driver.list_sizes() for size in sizes: self.assertTrue(size.price > 0) class RackspaceusFirstGenUkTests(OpenStack_1_0_Tests): should_list_locations = True should_have_pricing = True driver_klass = RackspaceFirstGenNodeDriver driver_type = RackspaceFirstGenNodeDriver driver_args = RACKSPACE_PARAMS driver_kwargs = {'region': 'uk'} def test_list_sizes_pricing(self): sizes = self.driver.list_sizes() for size in sizes: self.assertTrue(size.price > 0) class RackspaceNovaMockHttp(OpenStack_1_1_MockHttp): def __init__(self, *args, **kwargs): super(RackspaceNovaMockHttp, self).__init__(*args, **kwargs) methods1 = OpenStack_1_1_MockHttp.__dict__ names1 = [m for m in methods1 if m.find('_v1_1') == 0] for name in names1: method = methods1[name] new_name = name.replace('_v1_1_slug_', '_v2_1337_') setattr(self, new_name, method_type(method, self, RackspaceNovaMockHttp)) def _v2_1337_os_networksv2(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_networks.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == 'POST': body = self.fixtures.load('_os_networks_POST.json') return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v2_1337_os_networksv2_f13e5051_feea_416b_827a_1a0acc2dad14(self, method, url, body, headers): if method == 'DELETE': body = '' return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() class RackspaceNovaLonMockHttp(RackspaceNovaMockHttp): def _v2_0_tokens(self, method, url, body, headers): body = self.auth_fixtures.load('_v2_0__auth_lon.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) # Does not derive from TestCase because it should not be used by setup.py test class BaseRackspaceNovaTestCase(object): conn_class = RackspaceNovaMockHttp auth_url = 'https://auth.api.example.com' def create_driver(self): return self.driver_type(*self.driver_args, **self.driver_kwargs) def setUp(self): self.driver_klass.connectionCls.conn_class = self.conn_class self.driver_klass.connectionCls.auth_url = self.auth_url self.conn_class.type = None self.driver = self.create_driver() # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() clear_pricing_data() self.node = self.driver.list_nodes()[1] def test_service_catalog_contais_right_endpoint(self): self.assertEqual(self.driver.connection.get_endpoint(), self.expected_endpoint) def test_list_sizes_pricing(self): sizes = self.driver.list_sizes() for size in sizes: if size.ram > 256: self.assertTrue(size.price > 0) class RackspaceNovaDfwTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'dfw'} expected_endpoint = 'https://dfw.servers.api.rackspacecloud.com/v2/1337' class RackspaceNovaOrdTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'ord'} expected_endpoint = 'https://ord.servers.api.rackspacecloud.com/v2/1337' class RackspaceNovaIadTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'iad'} expected_endpoint = 'https://iad.servers.api.rackspacecloud.com/v2/1337' class RackspaceNovaLonTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'lon'} conn_class = RackspaceNovaLonMockHttp auth_url = 'https://lon.auth.api.example.com' expected_endpoint = 'https://lon.servers.api.rackspacecloud.com/v2/1337' class RackspaceNovaSydTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'syd'} expected_endpoint = 'https://syd.servers.api.rackspacecloud.com/v2/1337' class RackspaceNovaHkgTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver driver_args = RACKSPACE_NOVA_PARAMS driver_kwargs = {'region': 'hkg'} expected_endpoint = 'https://hkg.servers.api.rackspacecloud.com/v2/1337' if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ovh.py0000664000175000017500000002457013153541406023065 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from mock import patch from libcloud.utils.py3 import httplib from libcloud.compute.drivers.ovh import OvhNodeDriver from libcloud.test.common.test_ovh import BaseOvhMockHttp from libcloud.test.secrets import OVH_PARAMS from libcloud.test.file_fixtures import ComputeFileFixtures class OvhMockHttp(BaseOvhMockHttp): """Fixtures needed for tests related to rating model""" fixtures = ComputeFileFixtures('ovh') def _json_1_0_auth_time_get(self, method, url, body, headers): body = self.fixtures.load('auth_time_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_region_get(self, method, url, body, headers): body = self.fixtures.load('region_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_flavor_get(self, method, url, body, headers): body = self.fixtures.load('flavor_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_flavor_region_SBG1_get(self, method, url, body, headers): body = self.fixtures.load('flavor_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_flavor_foo_id_get(self, method, url, body, headers): body = self.fixtures.load('flavor_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_image_get(self, method, url, body, headers): body = self.fixtures.load('image_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_image_foo_id_get(self, method, url, body, headers): body = self.fixtures.load('image_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_sshkey_region_SBG1_get(self, method, url, body, headers): body = self.fixtures.load('ssh_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_sshkey_post(self, method, url, body, headers): body = self.fixtures.load('ssh_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_ssh_mykey_get(self, method, url, body, headers): body = self.fixtures.load('ssh_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_instance_get(self, method, url, body, headers): body = self.fixtures.load('instance_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_instance_foo_get(self, method, url, body, headers): body = self.fixtures.load('instance_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_instance_foo_delete(self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_instance_post(self, method, url, body, headers): body = self.fixtures.load('instance_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_get(self, method, url, body, headers): body = self.fixtures.load('volume_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_post(self, method, url, body, headers): body = self.fixtures.load('volume_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_foo_get(self, method, url, body, headers): body = self.fixtures.load('volume_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_foo_delete(self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_foo_attach_post(self, method, url, body, headers): body = self.fixtures.load('volume_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_foo_detach_post(self, method, url, body, headers): body = self.fixtures.load('volume_get_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_snapshot_region_SBG_1_get(self, method, url, body, headers): body = self.fixtures.load('volume_snapshot_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_snapshot_get(self, method, url, body, headers): body = self.fixtures.load('volume_snapshot_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_snapshot_foo_get(self, method, url, body, headers): body = self.fixtures.load('volume_snapshot_get_details.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_snapshot_foo_snap_delete(self, method, url, body, headers): return (httplib.OK, None, {}, httplib.responses[httplib.OK]) def _json_1_0_cloud_project_project_id_volume_foo_snapshot__post(self, method, url, body, headers): body = self.fixtures.load('volume_snapshot_get_details.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) @patch('libcloud.common.ovh.OvhConnection._timedelta', 42) class OvhTests(unittest.TestCase): def setUp(self): OvhNodeDriver.connectionCls.conn_class = OvhMockHttp OvhMockHttp.type = None self.driver = OvhNodeDriver(*OVH_PARAMS) def test_list_locations(self): images = self.driver.list_locations() self.assertTrue(len(images) > 0) def test_list_images(self): images = self.driver.list_images() self.assertTrue(len(images) > 0) def test_get_image(self): image = self.driver.get_image('foo-id') self.assertEqual(image.id, 'foo-id') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertTrue(len(sizes) > 0) def test_get_size(self): size = self.driver.ex_get_size('foo-id') self.assertEqual(size.id, 'foo-id') def test_list_key_pairs(self): keys = self.driver.list_sizes() self.assertTrue(len(keys) > 0) def test_get_key_pair(self): location = self.driver.list_locations()[0] key = self.driver.get_key_pair('mykey', location) self.assertEqual(key.name, 'mykey') def test_import_key_pair_from_string(self): location = self.driver.list_locations()[0] key = self.driver.import_key_pair_from_string('mykey', 'material', location) self.assertEqual(key.name, 'mykey') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertTrue(len(nodes) > 0) def test_get_node(self): node = self.driver.ex_get_node('foo') self.assertEqual(node.name, 'test_vm') def test_create_node(self): location = self.driver.list_locations()[0] image = self.driver.list_sizes(location)[0] size = self.driver.list_sizes(location)[0] node = self.driver.create_node(name='test_vm', image=image, size=size, location=location) self.assertEqual(node.name, 'test_vm') def test_destroy_node(self): node = self.driver.list_nodes()[0] self.driver.destroy_node(node) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertTrue(len(volumes) > 0) def test_get_volume(self): volume = self.driver.ex_get_volume('foo') self.assertEqual(volume.name, 'testvol') def test_create_volume(self): location = self.driver.list_locations()[0] volume = self.driver.create_volume(size=10, name='testvol', location=location) self.assertEqual(volume.name, 'testvol') def test_destroy_volume(self): volume = self.driver.list_volumes()[0] self.driver.destroy_volume(volume) def test_attach_volume(self): node = self.driver.list_nodes()[0] volume = self.driver.ex_get_volume('foo') response = self.driver.attach_volume(node=node, volume=volume) self.assertTrue(response) def test_detach_volume(self): node = self.driver.list_nodes()[0] volume = self.driver.ex_get_volume('foo') response = self.driver.detach_volume(ex_node=node, volume=volume) self.assertTrue(response) def test_ex_list_snapshots(self): self.driver.ex_list_snapshots() def test_ex_get_volume_snapshot(self): self.driver.ex_get_volume_snapshot('foo') def test_list_volume_snapshots(self): volume = self.driver.ex_get_volume('foo') self.driver.list_volume_snapshots(volume) def test_create_volume_snapshot(self): volume = self.driver.ex_get_volume('foo') self.driver.create_volume_snapshot(volume) def test_destroy_volume_snapshot(self): snapshot = self.driver.ex_get_volume_snapshot('foo') result = self.driver.destroy_volume_snapshot(snapshot) self.assertTrue(result) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ecp.py0000664000175000017500000001176713153541406023044 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.ecp import ECPNodeDriver from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import ECP_PARAMS class ECPTests(unittest.TestCase, TestCaseMixin): def setUp(self): ECPNodeDriver.connectionCls.conn_class = ECPMockHttp self.driver = ECPNodeDriver(*ECP_PARAMS, host='dummy') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) node = nodes[0] self.assertEqual(node.id, '1') self.assertEqual(node.name, 'dummy-1') self.assertEqual(node.public_ips[0], "42.78.124.75") self.assertEqual(node.state, NodeState.RUNNING) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.ram, 512) self.assertEqual(size.disk, 0) self.assertEqual(size.bandwidth, 0) self.assertEqual(size.price, 0) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 2) self.assertEqual( images[0].name, "centos54: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") self.assertEqual(images[0].id, "1") name = "centos54 two: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2" self.assertEqual(images[1].name, name) self.assertEqual(images[1].id, "2") def test_reboot_node(self): # Raises exception on failure node = self.driver.list_nodes()[0] self.driver.reboot_node(node) def test_destroy_node(self): # Raises exception on failure node = self.driver.list_nodes()[0] self.driver.destroy_node(node) def test_create_node(self): # Raises exception on failure size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name="api.ivan.net.nz", image=image, size=size) self.assertEqual(node.name, "api.ivan.net.nz") self.assertEqual(node.id, "1234") class ECPMockHttp(MockHttp): fixtures = ComputeFileFixtures('ecp') def _modules_hosting(self, method, url, body, headers): headers = {} headers['set-cookie'] = 'vcloud-token=testtoken' body = 'Anything' return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _rest_hosting_vm_1(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('vm_1_get.json') if method == 'POST': if body.find('delete', 0): body = self.fixtures.load('vm_1_action_delete.json') if body.find('stop', 0): body = self.fixtures.load('vm_1_action_stop.json') if body.find('start', 0): body = self.fixtures.load('vm_1_action_start.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _rest_hosting_vm(self, method, url, body, headers): if method == 'PUT': body = self.fixtures.load('vm_put.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _rest_hosting_vm_list(self, method, url, body, headers): body = self.fixtures.load('vm_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _rest_hosting_htemplate_list(self, method, url, body, headers): body = self.fixtures.load('htemplate_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _rest_hosting_network_list(self, method, url, body, headers): body = self.fixtures.load('network_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _rest_hosting_ptemplate_list(self, method, url, body, headers): body = self.fixtures.load('ptemplate_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ktucloud.py0000664000175000017500000001071213153541406024114 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qsl try: import simplejson as json except ImportError: import json from libcloud.compute.drivers.ktucloud import KTUCloudNodeDriver from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class KTUCloudNodeDriverTest(unittest.TestCase, TestCaseMixin): def setUp(self): KTUCloudNodeDriver.connectionCls.conn_class = KTUCloudStackMockHttp self.driver = KTUCloudNodeDriver('apikey', 'secret', path='/test/path', host='api.dummy.com') self.driver.path = '/test/path' self.driver.type = -1 KTUCloudStackMockHttp.fixture_tag = 'default' self.driver.connection.poll_interval = 0.0 def test_create_node_immediate_failure(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] KTUCloudStackMockHttp.fixture_tag = 'deployfail' self.assertRaises( Exception, self.driver.create_node, name='node-name', image=image, size=size) def test_create_node_delayed_failure(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] KTUCloudStackMockHttp.fixture_tag = 'deployfail2' self.assertRaises( Exception, self.driver.create_node, name='node-name', image=image, size=size) def test_list_images_no_images_available(self): KTUCloudStackMockHttp.fixture_tag = 'notemplates' images = self.driver.list_images() self.assertEqual(0, len(images)) def test_list_images_available(self): images = self.driver.list_images() self.assertEqual(112, len(images)) def test_list_sizes_available(self): sizes = self.driver.list_sizes() self.assertEqual(112, len(sizes)) def test_list_sizes_nodisk(self): KTUCloudStackMockHttp.fixture_tag = 'nodisk' sizes = self.driver.list_sizes() self.assertEqual(2, len(sizes)) check = False size = sizes[1] if size.id == KTUCloudNodeDriver.EMPTY_DISKOFFERINGID: check = True self.assertTrue(check) class KTUCloudStackMockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('ktucloud') fixture_tag = 'default' def _load_fixture(self, fixture): body = self.fixtures.load(fixture) return body, json.loads(body) def _test_path(self, method, url, body, headers): url = urlparse.urlparse(url) query = dict(parse_qsl(url.query)) self.assertTrue('apiKey' in query) self.assertTrue('command' in query) self.assertTrue('response' in query) self.assertTrue('signature' in query) self.assertTrue(query['response'] == 'json') del query['apiKey'] del query['response'] del query['signature'] command = query.pop('command') if hasattr(self, '_cmd_' + command): return getattr(self, '_cmd_' + command)(**query) else: fixture = command + '_' + self.fixture_tag + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cmd_queryAsyncJobResult(self, jobid): fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_vultr.py0000664000175000017500000001466413153541406023450 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest try: import simplejson as json except ImportError: import json # NOQA from libcloud.utils.py3 import httplib from libcloud.common.types import ServiceUnavailableError from libcloud.compute.drivers.vultr import VultrNodeDriver from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VULTR_PARAMS # class VultrTests(unittest.TestCase, TestCaseMixin): class VultrTests(LibcloudTestCase): def setUp(self): VultrNodeDriver.connectionCls.conn_class = VultrMockHttp VultrMockHttp.type = None self.driver = VultrNodeDriver(*VULTR_PARAMS) def test_list_images_dont_require_api_key(self): self.driver.list_images() self.assertFalse(self.driver.connection.require_api_key()) def test_list_images_success(self): images = self.driver.list_images() self.assertTrue(len(images) >= 1) image = images[0] self.assertTrue(image.id is not None) self.assertTrue(image.name is not None) def test_list_sizes_success(self): sizes = self.driver.list_sizes() self.assertTrue(len(sizes) == 17) size = sizes[0] self.assertTrue(size.id is not None) self.assertEqual(size.name, '8192 MB RAM,110 GB SSD,10.00 TB BW') self.assertEqual(size.ram, 8192) size = sizes[16] self.assertTrue(size.id is not None) self.assertEqual(size.name, '4096 MB RAM,1000 GB SATA,5.00 TB BW') self.assertEqual(size.ram, 4096) def test_list_locations_success(self): locations = self.driver.list_locations() self.assertTrue(len(locations) >= 1) location = locations[0] self.assertEqual(location.id, '1') self.assertEqual(location.name, 'New Jersey') def test_list_nodes_require_api_key(self): self.driver.list_nodes() self.assertTrue(self.driver.connection.require_api_key()) def test_list_nodes_success(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].id, '1') self.assertEqual(nodes[0].public_ips, ['108.61.206.153']) def test_reboot_node_success(self): node = self.driver.list_nodes()[0] result = self.driver.reboot_node(node) self.assertTrue(result) def test_create_node_success(self): test_size = self.driver.list_sizes()[0] test_image = self.driver.list_images()[0] test_location = self.driver.list_locations()[0] created_node = self.driver.create_node('test-node', test_size, test_image, test_location) self.assertEqual(created_node.id, "1") def test_destroy_node_success(self): node = self.driver.list_nodes()[0] result = self.driver.destroy_node(node) self.assertTrue(result) def test_list_key_pairs_success(self): key_pairs = self.driver.list_key_pairs() self.assertEqual(len(key_pairs), 1) key_pair = key_pairs[0] self.assertEqual(key_pair.id, '5806a8ef2a0c6') self.assertEqual(key_pair.name, 'test-key-pair') def test_create_key_pair_success(self): res = self.driver.create_key_pair('test-key-pair') self.assertTrue(res) def test_delete_key_pair_success(self): key_pairs = self.driver.list_key_pairs() key_pair = key_pairs[0] res = self.driver.delete_key_pair(key_pair) self.assertTrue(res) def test_rate_limit(self): VultrMockHttp.type = 'SERVICE_UNAVAILABLE' self.assertRaises(ServiceUnavailableError, self.driver.list_nodes) class VultrMockHttp(MockHttp): fixtures = ComputeFileFixtures('vultr') def _v1_regions_list(self, method, url, body, headers): body = self.fixtures.load('list_locations.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_os_list(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_plans_list(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_server_list(self, method, url, body, headers): body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_server_list_SERVICE_UNAVAILABLE(self, method, url, body, headers): body = self.fixtures.load('error_rate_limit.txt') return (httplib.SERVICE_UNAVAILABLE, body, {}, httplib.responses[httplib.SERVICE_UNAVAILABLE]) def _v1_server_create(self, method, url, body, headers): body = self.fixtures.load('create_node.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_server_destroy(self, method, url, body, headers): return (httplib.OK, "", {}, httplib.responses[httplib.OK]) def _v1_server_reboot(self, method, url, body, headers): return (httplib.OK, "", {}, httplib.responses[httplib.OK]) def _v1_sshkey_list(self, method, url, body, headers): body = self.fixtures.load('list_key_pairs.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_sshkey_create(self, method, url, body, headers): body = self.fixtures.load('create_key_pair.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_sshkey_destroy(self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_voxel.py0000664000175000017500000001401313153541406023415 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.base import Node, NodeSize, NodeImage, NodeLocation from libcloud.compute.drivers.voxel import VoxelNodeDriver as Voxel from libcloud.compute.types import InvalidCredsError from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VOXEL_PARAMS class VoxelTest(unittest.TestCase): def setUp(self): Voxel.connectionCls.conn_class = VoxelMockHttp VoxelMockHttp.type = None self.driver = Voxel(*VOXEL_PARAMS) def test_auth_failed(self): VoxelMockHttp.type = 'UNAUTHORIZED' try: self.driver.list_nodes() except Exception: e = sys.exc_info()[1] self.assertTrue(isinstance(e, InvalidCredsError)) else: self.fail('test should have thrown') def test_response_failure(self): VoxelMockHttp.type = 'FAILURE' try: self.driver.list_nodes() except Exception: pass else: self.fail('Invalid response, but exception was not thrown') def test_list_nodes(self): VoxelMockHttp.type = 'LIST_NODES' nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].name, 'www.voxel.net') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 13) def test_list_images(self): VoxelMockHttp.type = 'LIST_IMAGES' images = self.driver.list_images() self.assertEqual(len(images), 1) def test_list_locations(self): VoxelMockHttp.type = 'LIST_LOCATIONS' locations = self.driver.list_locations() self.assertEqual(len(locations), 2) self.assertEqual(locations[0].name, 'Amsterdam') def test_create_node_invalid_disk_size(self): image = NodeImage( id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) location = NodeLocation(id=1, name='Europe', country='England', driver=self.driver) try: self.driver.create_node(name='foo', image=image, size=size, location=location) except ValueError: pass else: self.fail('Invalid disk size provided but an exception was not' ' thrown') def test_create_node(self): VoxelMockHttp.type = 'CREATE_NODE' image = NodeImage( id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', 1024, 500, None, None, driver=self.driver) location = NodeLocation(id=1, name='Europe', country='England', driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size, location=location) self.assertEqual(node.id, '1234') node = self.driver.create_node(name='foo', image=image, size=size, location=location, voxel_access=True) self.assertEqual(node.id, '1234') def test_reboot_node(self): VoxelMockHttp.type = 'REBOOT_NODE' node = Node( id=72258, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) self.assertTrue(node.reboot()) def test_destroy_node(self): VoxelMockHttp.type = 'DESTROY_NODE' node = Node( id=72258, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) self.assertTrue(node.destroy()) class VoxelMockHttp(MockHttp): fixtures = ComputeFileFixtures('voxel') def _UNAUTHORIZED(self, method, url, body, headers): body = self.fixtures.load('unauthorized.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _FAILURE(self, method, url, body, headers): body = self.fixtures.load('failure.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _LIST_NODES(self, method, url, body, headers): body = self.fixtures.load('nodes.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _LIST_IMAGES(self, method, url, body, headers): body = self.fixtures.load('images.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _LIST_LOCATIONS(self, method, url, body, headers): body = self.fixtures.load('locations.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CREATE_NODE(self, method, url, body, headers): body = self.fixtures.load('create_node.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _REBOOT_NODE(self, method, url, body, headers): body = self.fixtures.load('success.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DESTROY_NODE(self, method, url, body, headers): body = self.fixtures.load('success.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_brightbox.py0000664000175000017500000003662413153541406024264 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import base64 try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.types import InvalidCredsError from libcloud.compute.drivers.brightbox import BrightboxNodeDriver from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import BRIGHTBOX_PARAMS USER_DATA = '#!/bin/sh\ntest_script.sh\n' class BrightboxTest(unittest.TestCase, TestCaseMixin): def setUp(self): BrightboxNodeDriver.connectionCls.conn_class = BrightboxMockHttp BrightboxMockHttp.type = None BrightboxNodeDriver.connectionCls.token = 'test' self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS) def test_authentication(self): BrightboxMockHttp.type = 'INVALID_CLIENT' self.assertRaises(InvalidCredsError, self.driver.list_nodes) BrightboxMockHttp.type = 'UNAUTHORIZED_CLIENT' self.assertRaises(InvalidCredsError, self.driver.list_nodes) def test_invalid_api_version(self): kwargs = {'api_version': '2.0'} self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS, **kwargs) self.assertRaises(Exception, self.driver.list_locations) def test_other_host(self): kwargs = {'host': 'api.gbt.brightbox.com'} self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS, **kwargs) locations = self.driver.list_locations() self.assertEqual(len(locations), 0) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) self.assertEqual(len(nodes[0].public_ips), 1) self.assertEqual(len(nodes[1].public_ips), 1) self.assertEqual(len(nodes[0].private_ips), 1) self.assertEqual(len(nodes[1].private_ips), 1) self.assertTrue('109.107.35.16' in nodes[0].public_ips) self.assertTrue('10.74.210.210' in nodes[0].private_ips) self.assertTrue('10.240.228.234' in nodes[1].private_ips) self.assertTrue( '2a02:1348:14c:393a:24:19ff:fef0:e4ea' in nodes[1].public_ips) self.assertEqual(nodes[0].state, NodeState.RUNNING) self.assertEqual(nodes[1].state, NodeState.RUNNING) def test_list_node_extras(self): nodes = self.driver.list_nodes() self.assertFalse(nodes[0].size is None) self.assertFalse(nodes[1].size is None) self.assertFalse(nodes[0].image is None) self.assertFalse(nodes[1].image is None) self.assertEqual(nodes[0].image.id, 'img-arm8f') self.assertEqual(nodes[0].size.id, 'typ-urtky') self.assertEqual(nodes[1].image.id, 'img-j93gd') self.assertEqual(nodes[1].size.id, 'typ-qdiwq') self.assertEqual(nodes[0].extra['fqdn'], 'srv-xvpn7.gb1.brightbox.com') self.assertEqual(nodes[1].extra['fqdn'], 'srv-742vn.gb1.brightbox.com') self.assertEqual(nodes[0].extra['hostname'], 'srv-xvpn7') self.assertEqual(nodes[1].extra['hostname'], 'srv-742vn') self.assertEqual(nodes[0].extra['status'], 'active') self.assertEqual(nodes[1].extra['status'], 'active') self.assertTrue('interfaces' in nodes[0].extra) self.assertTrue('zone' in nodes[0].extra) self.assertTrue('snapshots' in nodes[0].extra) self.assertTrue('server_groups' in nodes[0].extra) self.assertTrue('started_at' in nodes[0].extra) self.assertTrue('created_at' in nodes[0].extra) self.assertFalse('deleted_at' in nodes[0].extra) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 7) self.assertEqual(sizes[0].id, 'typ-4nssg') self.assertEqual(sizes[0].name, 'Brightbox Nano Instance') self.assertEqual(sizes[0].ram, 512) self.assertEqual(sizes[0].disk, 20480) self.assertEqual(sizes[0].bandwidth, 0) self.assertEqual(sizes[0].price, 0) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 3) self.assertEqual(images[0].id, 'img-99q79') self.assertEqual(images[0].name, 'CentOS 5.5 server') self.assertTrue('ancestor' in images[0].extra) self.assertFalse('licence_name' in images[0].extra) def test_list_images_extras(self): images = self.driver.list_images() extra = images[-1].extra self.assertEqual(extra['arch'], 'i686') self.assertFalse(extra['compatibility_mode']) self.assertEqual(extra['created_at'], '2012-01-22T05:36:24Z') self.assertTrue('description' in extra) self.assertEqual(extra['disk_size'], 671) self.assertFalse('min_ram' in extra) self.assertFalse(extra['official']) self.assertEqual(extra['owner'], 'acc-tqs4c') self.assertTrue(extra['public']) self.assertEqual(extra['source'], 'oneiric-i386-20178.gz') self.assertEqual(extra['source_type'], 'upload') self.assertEqual(extra['status'], 'deprecated') self.assertEqual(extra['username'], 'ubuntu') self.assertEqual(extra['virtual_size'], 1025) self.assertFalse('ancestor' in extra) self.assertFalse('licence_name' in extra) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(locations[0].id, 'zon-6mxqw') self.assertEqual(locations[0].name, 'gb1-a') self.assertEqual(locations[1].id, 'zon-remk1') self.assertEqual(locations[1].name, 'gb1-b') def test_reboot_node_response(self): node = self.driver.list_nodes()[0] self.assertRaises(NotImplementedError, self.driver.reboot_node, [node]) def test_destroy_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.destroy_node(node)) def test_create_node(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name='Test Node', image=image, size=size) self.assertEqual('srv-p61uj', node.id) self.assertEqual('Test Node', node.name) self.assertEqual('gb1-a', node.extra['zone'].name) def test_create_node_in_location(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[1] node = self.driver.create_node( name='Test Node', image=image, size=size, location=location) self.assertEqual('srv-nnumd', node.id) self.assertEqual('Test Node', node.name) self.assertEqual('gb1-b', node.extra['zone'].name) def test_create_node_with_user_data(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name='Test Node', image=image, size=size, ex_userdata=USER_DATA) decoded = base64.b64decode(b(node.extra['user_data'])).decode('ascii') self.assertEqual('gb1-a', node.extra['zone'].name) self.assertEqual(USER_DATA, decoded) def test_create_node_with_a_server_group(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name='Test Node', image=image, size=size, ex_servergroup='grp-12345') self.assertEqual('gb1-a', node.extra['zone'].name) self.assertEqual(len(node.extra['server_groups']), 1) self.assertEqual(node.extra['server_groups'][0]['id'], 'grp-12345') def test_create_node_with_a_list_of_server_groups(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name='Test Node', image=image, size=size, ex_servergroup=['grp-12345', 'grp-67890']) self.assertEqual('gb1-a', node.extra['zone'].name) self.assertEqual(len(node.extra['server_groups']), 2) self.assertEqual(node.extra['server_groups'][0]['id'], 'grp-12345') self.assertEqual(node.extra['server_groups'][1]['id'], 'grp-67890') def test_list_cloud_ips(self): cip_list = self.driver.ex_list_cloud_ips() self.assertEqual(len(cip_list), 4) self.assertEqual(cip_list[2]['status'], 'mapped') cip_check = cip_list[0] self.assertEqual(cip_check['id'], 'cip-tlrp3') self.assertEqual(cip_check['public_ip'], '109.107.35.16') self.assertEqual( cip_check['reverse_dns'], 'cip-109-107-35-16.gb1.brightbox.com') self.assertEqual(cip_check['status'], 'unmapped') self.assertTrue(cip_check['server'] is None) self.assertTrue(cip_check['server_group'] is None) self.assertTrue(cip_check['interface'] is None) self.assertTrue(cip_check['load_balancer'] is None) def test_create_cloud_ip(self): cip = self.driver.ex_create_cloud_ip() self.assertEqual(cip['id'], 'cip-jsjc5') self.assertEqual( cip['reverse_dns'], 'cip-109-107-37-234.gb1.brightbox.com') def test_create_cloud_ip_with_dns(self): cip = self.driver.ex_create_cloud_ip('fred.co.uk') self.assertEqual(cip['id'], 'cip-jsjc5') self.assertEqual(cip['reverse_dns'], 'fred.co.uk') def test_map_cloud_ip(self): self.assertTrue(self.driver.ex_map_cloud_ip('cip-jsjc5', 'int-ztqbx')) def test_unmap_cloud_ip(self): self.assertTrue(self.driver.ex_unmap_cloud_ip('cip-jsjc5')) def test_update_cloud_ip(self): self.assertTrue( self.driver.ex_update_cloud_ip('cip-jsjc5', 'fred.co.uk')) def test_destroy_cloud_ip(self): self.assertTrue(self.driver.ex_destroy_cloud_ip('cip-jsjc5')) class BrightboxMockHttp(MockHttp): fixtures = ComputeFileFixtures('brightbox') def _token(self, method, url, body, headers): if method == 'POST': return self.test_response(httplib.OK, self.fixtures.load('token.json')) def _token_INVALID_CLIENT(self, method, url, body, headers): if method == 'POST': return self.test_response(httplib.BAD_REQUEST, '{"error":"invalid_client"}') def _token_UNAUTHORIZED_CLIENT(self, method, url, body, headers): if method == 'POST': return self.test_response(httplib.UNAUTHORIZED, '{"error":"unauthorized_client"}') def _1_0_servers_INVALID_CLIENT(self, method, url, body, headers): return self.test_response(httplib.BAD_REQUEST, '{"error":"invalid_client"}') def _1_0_servers_UNAUTHORIZED_CLIENT(self, method, url, body, headers): return self.test_response(httplib.UNAUTHORIZED, '{"error":"unauthorized_client"}') def _1_0_images(self, method, url, body, headers): if method == 'GET': return self.test_response(httplib.OK, self.fixtures.load('list_images.json')) def _1_0_servers(self, method, url, body, headers): if method == 'GET': return self.test_response(httplib.OK, self.fixtures.load('list_servers.json')) elif method == 'POST': body = json.loads(body) encoded = base64.b64encode(b(USER_DATA)).decode('ascii') if 'user_data' in body and body['user_data'] != encoded: data = '{"error_name":"dodgy user data", "errors": ["User data not encoded properly"]}' return self.test_response(httplib.BAD_REQUEST, data) if body.get('zone', '') == 'zon-remk1': node = json.loads( self.fixtures.load('create_server_gb1_b.json')) else: node = json.loads( self.fixtures.load('create_server_gb1_a.json')) node['name'] = body['name'] if 'server_groups' in body: node['server_groups'] = [{'id': x} for x in body['server_groups']] if 'user_data' in body: node['user_data'] = body['user_data'] return self.test_response(httplib.ACCEPTED, json.dumps(node)) def _1_0_servers_srv_xvpn7(self, method, url, body, headers): if method == 'DELETE': return self.test_response(httplib.ACCEPTED, '') def _1_0_server_types(self, method, url, body, headers): if method == 'GET': return self.test_response(httplib.OK, self.fixtures.load('list_server_types.json')) def _1_0_zones(self, method, url, body, headers): if method == 'GET': if headers['Host'] == 'api.gbt.brightbox.com': return self.test_response(httplib.OK, "{}") else: return self.test_response(httplib.OK, self.fixtures.load('list_zones.json')) def _2_0_zones(self, method, url, body, headers): data = '{"error_name":"unrecognised_endpoint", "errors": ["The request was for an unrecognised API endpoint"]}' return self.test_response(httplib.BAD_REQUEST, data) def _1_0_cloud_ips(self, method, url, body, headers): if method == 'GET': return self.test_response(httplib.OK, self.fixtures.load('list_cloud_ips.json')) elif method == 'POST': if body: body = json.loads(body) node = json.loads(self.fixtures.load('create_cloud_ip.json')) if 'reverse_dns' in body: node['reverse_dns'] = body['reverse_dns'] return self.test_response(httplib.ACCEPTED, json.dumps(node)) def _1_0_cloud_ips_cip_jsjc5(self, method, url, body, headers): if method == 'DELETE': return self.test_response(httplib.OK, '') elif method == 'PUT': body = json.loads(body) if body.get('reverse_dns', None) == 'fred.co.uk': return self.test_response(httplib.OK, '') else: return self.test_response(httplib.BAD_REQUEST, '{"error_name":"bad dns", "errors": ["Bad dns"]}') def _1_0_cloud_ips_cip_jsjc5_map(self, method, url, body, headers): if method == 'POST': body = json.loads(body) if 'destination' in body: return self.test_response(httplib.ACCEPTED, '') else: data = '{"error_name":"bad destination", "errors": ["Bad destination"]}' return self.test_response(httplib.BAD_REQUEST, data) def _1_0_cloud_ips_cip_jsjc5_unmap(self, method, url, body, headers): if method == 'POST': return self.test_response(httplib.ACCEPTED, '') def test_response(self, status, body): return (status, body, {'content-type': 'application/json'}, httplib.responses[status]) if __name__ == '__main__': sys.exit(unittest.main()) # vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 # filetype=python apache-libcloud-2.2.1/libcloud/test/compute/test_hostvirtual.py0000664000175000017500000002074613153541406024656 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.hostvirtual import HostVirtualNodeDriver from libcloud.compute.types import NodeState from libcloud.compute.base import NodeAuthPassword from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import HOSTVIRTUAL_PARAMS class HostVirtualTest(unittest.TestCase): def setUp(self): HostVirtualNodeDriver.connectionCls.conn_class = HostVirtualMockHttp self.driver = HostVirtualNodeDriver(*HOSTVIRTUAL_PARAMS) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 4) self.assertEqual(len(nodes[0].public_ips), 1) self.assertEqual(len(nodes[1].public_ips), 1) self.assertEqual(len(nodes[0].private_ips), 0) self.assertEqual(len(nodes[1].private_ips), 0) self.assertTrue('208.111.39.118' in nodes[1].public_ips) self.assertTrue('208.111.45.250' in nodes[0].public_ips) self.assertEqual(nodes[3].state, NodeState.RUNNING) self.assertEqual(nodes[1].state, NodeState.TERMINATED) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 14) self.assertEqual(sizes[0].id, '31') self.assertEqual(sizes[4].id, '71') self.assertEqual(sizes[2].ram, '512MB') self.assertEqual(sizes[2].disk, '20GB') self.assertEqual(sizes[3].bandwidth, '600GB') self.assertEqual(sizes[1].price, '15.00') def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 8) self.assertEqual(images[0].id, '1739') self.assertEqual(images[0].name, 'Gentoo 2012 (0619) i386') def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(locations[0].id, '3') self.assertEqual(locations[0].name, 'SJC - San Jose, CA') self.assertEqual(locations[1].id, '13') self.assertEqual(locations[1].name, 'IAD - Reston, VA') def test_reboot_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.reboot_node(node)) def test_ex_get_node(self): node = self.driver.ex_get_node(node_id='62291') self.assertEqual(node.id, '62291') self.assertEqual(node.name, 'server1.vr-cluster.org') self.assertEqual(node.state, NodeState.TERMINATED) self.assertTrue('208.111.45.250' in node.public_ips) def test_ex_list_packages(self): pkgs = self.driver.ex_list_packages() self.assertEqual(len(pkgs), 3) self.assertEqual(pkgs[1]['mbpkgid'], '176018') self.assertEqual(pkgs[2]['package_status'], 'Suspended') def test_ex_order_package(self): sizes = self.driver.list_sizes() pkg = self.driver.ex_order_package(sizes[0]) self.assertEqual(pkg['id'], '62291') def test_ex_cancel_package(self): node = self.driver.list_nodes()[0] result = self.driver.ex_cancel_package(node) self.assertEqual(result['status'], 'success') def test_ex_unlink_package(self): node = self.driver.list_nodes()[0] result = self.driver.ex_unlink_package(node) self.assertEqual(result['status'], 'success') def test_ex_stop_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_stop_node(node)) def test_ex_start_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_start_node(node)) def test_destroy_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.destroy_node(node)) def test_ex_delete_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_delete_node(node)) def test_create_node(self): auth = NodeAuthPassword('vr!@#hosted#@!') size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] node = self.driver.create_node( name='test.com', image=image, size=size, auth=auth ) self.assertEqual('62291', node.id) self.assertEqual('server1.vr-cluster.org', node.name) def test_ex_provision_node(self): node = self.driver.list_nodes()[0] auth = NodeAuthPassword('vr!@#hosted#@!') self.assertTrue(self.driver.ex_provision_node( node=node, auth=auth )) def test_create_node_in_location(self): auth = NodeAuthPassword('vr!@#hosted#@!') size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[1] node = self.driver.create_node( name='test.com', image=image, size=size, auth=auth, location=location ) self.assertEqual('62291', node.id) self.assertEqual('server1.vr-cluster.org', node.name) class HostVirtualMockHttp(MockHttp): fixtures = ComputeFileFixtures('hostvirtual') def _cloud_servers(self, method, url, body, headers): body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server(self, method, url, body, headers): body = self.fixtures.load('get_node.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_packages(self, method, url, body, headers): body = self.fixtures.load('list_packages.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_sizes(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_images(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_locations(self, method, url, body, headers): body = self.fixtures.load('list_locations.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server_delete(self, method, url, body, headers): body = self.fixtures.load('cancel_package.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server_reboot(self, method, url, body, headers): body = self.fixtures.load('node_reboot.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server_shutdown(self, method, url, body, headers): body = self.fixtures.load('node_stop.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server_start(self, method, url, body, headers): body = self.fixtures.load('node_start.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_server_build(self, method, url, body, headers): body = self.fixtures.load('order_package.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_buy(self, method, url, body, headers): body = self.fixtures.load('order_package.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_cancel(self, method, url, body, headers): body = self.fixtures.load('cancel_package.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cloud_unlink(self, method, url, body, headers): body = self.fixtures.load('unlink_package.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) # vim:autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python apache-libcloud-2.2.1/libcloud/test/compute/test_cloudstack.py0000664000175000017500000015612113153541406024423 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qsl try: import simplejson as json except ImportError: import json from libcloud.compute.base import NodeLocation from libcloud.common.types import ProviderError from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver, \ CloudStackAffinityGroupType from libcloud.compute.types import LibcloudError, Provider, InvalidCredsError from libcloud.compute.types import KeyPairDoesNotExistError from libcloud.compute.types import NodeState from libcloud.compute.providers import get_driver from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class CloudStackCommonTestCase(TestCaseMixin): driver_klass = CloudStackNodeDriver def setUp(self): self.driver_klass.connectionCls.conn_class = CloudStackMockHttp self.driver = self.driver_klass('apikey', 'secret', path='/test/path', host='api.dummy.com') self.driver.path = '/test/path' self.driver.type = -1 CloudStackMockHttp.type = None CloudStackMockHttp.fixture_tag = 'default' self.driver.connection.poll_interval = 0.0 def test_invalid_credentials(self): CloudStackMockHttp.type = 'invalid_credentials' driver = self.driver_klass('invalid', 'invalid', path='/test/path', host='api.dummy.com') self.assertRaises(InvalidCredsError, driver.list_nodes) def test_import_keypair_from_string_api_error(self): CloudStackMockHttp.type = 'api_error' name = 'test-pair' key_material = '' expected_msg = 'Public key is invalid' self.assertRaisesRegexp(ProviderError, expected_msg, self.driver.import_key_pair_from_string, name=name, key_material=key_material) def test_create_node_immediate_failure(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] CloudStackMockHttp.fixture_tag = 'deployfail' self.assertRaises( Exception, self.driver.create_node, name='node-name', image=image, size=size) def test_create_node_delayed_failure(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] CloudStackMockHttp.fixture_tag = 'deployfail2' self.assertRaises( Exception, self.driver.create_node, name='node-name', image=image, size=size) def test_create_node_default_location_success(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] default_location = self.driver.list_locations()[0] node = self.driver.create_node(name='fred', image=image, size=size) self.assertEqual(node.name, 'fred') self.assertEqual(node.public_ips, []) self.assertEqual(node.private_ips, ['192.168.1.2']) self.assertEqual(node.extra['zone_id'], default_location.id) def test_create_node_ex_networks(self): CloudStackMockHttp.fixture_tag = 'deploynetworks' size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] networks = [nw for nw in self.driver.ex_list_networks() if str(nw.zoneid) == str(location.id)] node = self.driver.create_node(name='deploynetworks', location=location, image=image, size=size, networks=networks) self.assertEqual(node.name, 'deploynetworks') self.assertEqual(node.extra['size_id'], size.id) self.assertEqual(node.extra['zone_id'], location.id) self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(len(node.private_ips), 2) def test_create_node_ex_ipaddress(self): CloudStackMockHttp.fixture_tag = 'deployip' size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] ipaddress = '10.1.0.128' networks = [nw for nw in self.driver.ex_list_networks() if str(nw.zoneid) == str(location.id)] node = self.driver.create_node(name='deployip', location=location, image=image, size=size, networks=networks, ex_ip_address=ipaddress) self.assertEqual(node.name, 'deployip') self.assertEqual(node.extra['size_id'], size.id) self.assertEqual(node.extra['zone_id'], location.id) self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(node.private_ips[0], ipaddress) def test_create_node_ex_rootdisksize(self): CloudStackMockHttp.fixture_tag = 'rootdisksize' size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] volumes = self.driver.list_volumes() rootdisksize = '50' networks = [nw for nw in self.driver.ex_list_networks() if str(nw.zoneid) == str(location.id)] node = self.driver.create_node(name='rootdisksize', location=location, image=image, size=size, networks=networks, ex_rootdisksize=rootdisksize) self.assertEqual(node.name, 'rootdisksize') self.assertEqual(node.extra['size_id'], size.id) self.assertEqual(node.extra['zone_id'], location.id) self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(1, len(volumes)) self.assertEqual('ROOT-69941', volumes[0].name) self.assertEqual(53687091200, volumes[0].size) def test_create_node_ex_start_vm_false(self): CloudStackMockHttp.fixture_tag = 'stoppedvm' size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] networks = [nw for nw in self.driver.ex_list_networks() if str(nw.zoneid) == str(location.id)] node = self.driver.create_node(name='stopped_vm', location=location, image=image, size=size, networks=networks, ex_start_vm=False) self.assertEqual(node.name, 'stopped_vm') self.assertEqual(node.extra['size_id'], size.id) self.assertEqual(node.extra['zone_id'], location.id) self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(node.state, NodeState.STOPPED) def test_create_node_ex_security_groups(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] sg = [sg['name'] for sg in self.driver.ex_list_security_groups()] CloudStackMockHttp.fixture_tag = 'deploysecuritygroup' node = self.driver.create_node(name='test', location=location, image=image, size=size, ex_security_groups=sg) self.assertEqual(node.name, 'test') self.assertEqual(node.extra['security_group'], sg) self.assertEqual(node.id, 'fc4fd31a-16d3-49db-814a-56b39b9ef986') def test_create_node_ex_keyname(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] CloudStackMockHttp.fixture_tag = 'deploykeyname' node = self.driver.create_node(name='test', location=location, image=image, size=size, ex_keyname='foobar') self.assertEqual(node.name, 'test') self.assertEqual(node.extra['key_name'], 'foobar') def test_create_node_ex_userdata(self): self.driver.path = '/test/path/userdata' size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] CloudStackMockHttp.fixture_tag = 'deploykeyname' node = self.driver.create_node(name='test', location=location, image=image, size=size, ex_userdata='foobar') self.assertEqual(node.name, 'test') def test_create_node_project(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] project = self.driver.ex_list_projects()[0] CloudStackMockHttp.fixture_tag = 'deployproject' node = self.driver.create_node(name='test', location=location, image=image, size=size, project=project) self.assertEqual(node.name, 'TestNode') self.assertEqual(node.extra['project'], 'Test Project') def test_list_images_no_images_available(self): CloudStackMockHttp.fixture_tag = 'notemplates' images = self.driver.list_images() self.assertEqual(0, len(images)) def test_list_images(self): _, fixture = self.driver.connection.connection._load_fixture( 'listTemplates_default.json') templates = fixture['listtemplatesresponse']['template'] images = self.driver.list_images() for i, image in enumerate(images): # NodeImage expects id to be a string, # the CloudStack fixture has an int tid = str(templates[i]['id']) tname = templates[i]['name'] self.assertIsInstance(image.driver, CloudStackNodeDriver) self.assertEqual(image.id, tid) self.assertEqual(image.name, tname) def test_ex_list_disk_offerings(self): diskOfferings = self.driver.ex_list_disk_offerings() self.assertEqual(1, len(diskOfferings)) diskOffering, = diskOfferings self.assertEqual('Disk offer 1', diskOffering.name) self.assertEqual(10, diskOffering.size) def test_ex_list_networks(self): _, fixture = self.driver.connection.connection._load_fixture( 'listNetworks_default.json') fixture_networks = fixture['listnetworksresponse']['network'] networks = self.driver.ex_list_networks() for i, network in enumerate(networks): self.assertEqual(network.id, fixture_networks[i]['id']) self.assertEqual( network.displaytext, fixture_networks[i]['displaytext']) self.assertEqual(network.name, fixture_networks[i]['name']) self.assertEqual( network.networkofferingid, fixture_networks[i]['networkofferingid']) self.assertEqual(network.zoneid, fixture_networks[i]['zoneid']) def test_ex_list_network_offerings(self): _, fixture = self.driver.connection.connection._load_fixture( 'listNetworkOfferings_default.json') fixture_networkoffers = \ fixture['listnetworkofferingsresponse']['networkoffering'] networkoffers = self.driver.ex_list_network_offerings() for i, networkoffer in enumerate(networkoffers): self.assertEqual(networkoffer.id, fixture_networkoffers[i]['id']) self.assertEqual(networkoffer.name, fixture_networkoffers[i]['name']) self.assertEqual(networkoffer.display_text, fixture_networkoffers[i]['displaytext']) self.assertEqual(networkoffer.for_vpc, fixture_networkoffers[i]['forvpc']) self.assertEqual(networkoffer.guest_ip_type, fixture_networkoffers[i]['guestiptype']) self.assertEqual(networkoffer.service_offering_id, fixture_networkoffers[i]['serviceofferingid']) def test_ex_create_network(self): _, fixture = self.driver.connection.connection._load_fixture( 'createNetwork_default.json') fixture_network = fixture['createnetworkresponse']['network'] netoffer = self.driver.ex_list_network_offerings()[0] location = self.driver.list_locations()[0] network = self.driver.ex_create_network(display_text='test', name='test', network_offering=netoffer, location=location, gateway='10.1.1.1', netmask='255.255.255.0', network_domain='cloud.local', vpc_id="2", project_id="2") self.assertEqual(network.name, fixture_network['name']) self.assertEqual(network.displaytext, fixture_network['displaytext']) self.assertEqual(network.id, fixture_network['id']) self.assertEqual(network.extra['gateway'], fixture_network['gateway']) self.assertEqual(network.extra['netmask'], fixture_network['netmask']) self.assertEqual(network.networkofferingid, fixture_network['networkofferingid']) self.assertEqual(network.extra['vpc_id'], fixture_network['vpcid']) self.assertEqual(network.extra['project_id'], fixture_network['projectid']) def test_ex_delete_network(self): network = self.driver.ex_list_networks()[0] result = self.driver.ex_delete_network(network=network) self.assertTrue(result) def test_ex_list_nics(self): _, fixture = self.driver.connection.connection._load_fixture( 'listNics_default.json') fixture_nic = fixture['listnicsresponse']['nic'] vm = self.driver.list_nodes()[0] nics = self.driver.ex_list_nics(vm) for i, nic in enumerate(nics): self.assertEqual(nic.id, fixture_nic[i]['id']) self.assertEqual(nic.network_id, fixture_nic[i]['networkid']) self.assertEqual(nic.net_mask, fixture_nic[i]['netmask']) self.assertEqual(nic.gateway, fixture_nic[i]['gateway']) self.assertEqual(nic.ip_address, fixture_nic[i]['ipaddress']) self.assertEqual(nic.is_default, fixture_nic[i]['isdefault']) self.assertEqual(nic.mac_address, fixture_nic[i]['macaddress']) def test_ex_add_nic_to_node(self): vm = self.driver.list_nodes()[0] network = self.driver.ex_list_networks()[0] ip = "10.1.4.123" result = self.driver.ex_attach_nic_to_node(node=vm, network=network, ip_address=ip) self.assertTrue(result) def test_ex_remove_nic_from_node(self): vm = self.driver.list_nodes()[0] nic = self.driver.ex_list_nics(node=vm)[0] result = self.driver.ex_detach_nic_from_node(node=vm, nic=nic) self.assertTrue(result) def test_ex_list_vpc_offerings(self): _, fixture = self.driver.connection.connection._load_fixture( 'listVPCOfferings_default.json') fixture_vpcoffers = \ fixture['listvpcofferingsresponse']['vpcoffering'] vpcoffers = self.driver.ex_list_vpc_offerings() for i, vpcoffer in enumerate(vpcoffers): self.assertEqual(vpcoffer.id, fixture_vpcoffers[i]['id']) self.assertEqual(vpcoffer.name, fixture_vpcoffers[i]['name']) self.assertEqual(vpcoffer.display_text, fixture_vpcoffers[i]['displaytext']) def test_ex_list_vpcs(self): _, fixture = self.driver.connection.connection._load_fixture( 'listVPCs_default.json') fixture_vpcs = fixture['listvpcsresponse']['vpc'] vpcs = self.driver.ex_list_vpcs() for i, vpc in enumerate(vpcs): self.assertEqual(vpc.id, fixture_vpcs[i]['id']) self.assertEqual(vpc.display_text, fixture_vpcs[i]['displaytext']) self.assertEqual(vpc.name, fixture_vpcs[i]['name']) self.assertEqual(vpc.vpc_offering_id, fixture_vpcs[i]['vpcofferingid']) self.assertEqual(vpc.zone_id, fixture_vpcs[i]['zoneid']) def test_ex_list_routers(self): _, fixture = self.driver.connection.connection._load_fixture( 'listRouters_default.json') fixture_routers = fixture['listroutersresponse']['router'] routers = self.driver.ex_list_routers() for i, router in enumerate(routers): self.assertEqual(router.id, fixture_routers[i]['id']) self.assertEqual(router.name, fixture_routers[i]['name']) self.assertEqual(router.state, fixture_routers[i]['state']) self.assertEqual(router.public_ip, fixture_routers[i]['publicip']) self.assertEqual(router.vpc_id, fixture_routers[i]['vpcid']) def test_ex_create_vpc(self): _, fixture = self.driver.connection.connection._load_fixture( 'createVPC_default.json') fixture_vpc = fixture['createvpcresponse'] vpcoffer = self.driver.ex_list_vpc_offerings()[0] vpc = self.driver.ex_create_vpc(cidr='10.1.1.0/16', display_text='cloud.local', name='cloud.local', vpc_offering=vpcoffer, zone_id="2") self.assertEqual(vpc.id, fixture_vpc['id']) def test_ex_delete_vpc(self): vpc = self.driver.ex_list_vpcs()[0] result = self.driver.ex_delete_vpc(vpc=vpc) self.assertTrue(result) def test_ex_create_network_acllist(self): _, fixture = self.driver.connection.connection._load_fixture( 'createNetworkACLList_default.json') fixture_network_acllist = fixture['createnetworkacllistresponse'] vpc = self.driver.ex_list_vpcs()[0] network_acllist = self.driver.ex_create_network_acllist( name='test_acllist', vpc_id=vpc.id, description='test description') self.assertEqual(network_acllist.id, fixture_network_acllist['id']) def test_ex_list_network_acllist(self): _, fixture = self.driver.connection.connection._load_fixture( 'listNetworkACLLists_default.json') fixture_acllist = \ fixture['listnetworkacllistsresponse']['networkacllist'] acllist = self.driver.ex_list_network_acllists() for i, acllist in enumerate(acllist): self.assertEqual(acllist.id, fixture_acllist[i]['id']) self.assertEqual(acllist.name, fixture_acllist[i]['name']) self.assertEqual(acllist.description, fixture_acllist[i]['description']) def test_ex_create_network_acl(self): _, fixture = self.driver.connection.connection._load_fixture( 'createNetworkACL_default.json') fixture_network_acllist = fixture['createnetworkaclresponse'] acllist = self.driver.ex_list_network_acllists()[0] network_acl = self.driver.ex_create_network_acl( protocol='test_acllist', acl_id=acllist.id, cidr_list='', start_port='80', end_port='80') self.assertEqual(network_acl.id, fixture_network_acllist['id']) def test_ex_list_projects(self): _, fixture = self.driver.connection.connection._load_fixture( 'listProjects_default.json') fixture_projects = fixture['listprojectsresponse']['project'] projects = self.driver.ex_list_projects() for i, project in enumerate(projects): self.assertEqual(project.id, fixture_projects[i]['id']) self.assertEqual( project.display_text, fixture_projects[i]['displaytext']) self.assertEqual(project.name, fixture_projects[i]['name']) self.assertEqual( project.extra['domainid'], fixture_projects[i]['domainid']) self.assertEqual( project.extra['cpulimit'], fixture_projects[i]['cpulimit']) # Note -1 represents unlimited self.assertEqual(project.extra['networklimit'], -1) def test_create_volume(self): volumeName = 'vol-0' location = self.driver.list_locations()[0] volume = self.driver.create_volume(10, volumeName, location) self.assertEqual(volumeName, volume.name) self.assertEqual(10, volume.size) def test_create_volume_no_noncustomized_offering_with_size(self): """If the sizes of disk offerings are not configurable and there are no disk offerings with the requested size, an exception should be thrown.""" location = self.driver.list_locations()[0] self.assertRaises( LibcloudError, self.driver.create_volume, 'vol-0', location, 11) def test_create_volume_with_custom_disk_size_offering(self): CloudStackMockHttp.fixture_tag = 'withcustomdisksize' volumeName = 'vol-0' location = self.driver.list_locations()[0] volume = self.driver.create_volume(10, volumeName, location) self.assertEqual(volumeName, volume.name) def test_create_volume_no_matching_volume_type(self): """If the ex_disk_type does not exit, then an exception should be thrown.""" location = self.driver.list_locations()[0] self.assertRaises( LibcloudError, self.driver.create_volume, 'vol-0', location, 11, ex_volume_type='FooVolumeType') def test_create_volume_with_defined_volume_type(self): CloudStackMockHttp.fixture_tag = 'withvolumetype' volumeName = 'vol-0' volLocation = self.driver.list_locations()[0] diskOffering = self.driver.ex_list_disk_offerings()[0] volumeType = diskOffering.name volume = self.driver.create_volume(10, volumeName, location=volLocation, ex_volume_type=volumeType) self.assertEqual(volumeName, volume.name) def test_attach_volume(self): node = self.driver.list_nodes()[0] volumeName = 'vol-0' location = self.driver.list_locations()[0] volume = self.driver.create_volume(10, volumeName, location) attachReturnVal = self.driver.attach_volume(volume, node) self.assertTrue(attachReturnVal) def test_detach_volume(self): volumeName = 'gre-test-volume' location = self.driver.list_locations()[0] volume = self.driver.create_volume(10, volumeName, location) res = self.driver.detach_volume(volume) self.assertTrue(res) def test_destroy_volume(self): volumeName = 'gre-test-volume' location = self.driver.list_locations()[0] volume = self.driver.create_volume(10, volumeName, location) res = self.driver.destroy_volume(volume) self.assertTrue(res) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(1, len(volumes)) self.assertEqual('ROOT-69942', volumes[0].name) def test_ex_get_volume(self): volume = self.driver.ex_get_volume(2600) self.assertEqual('ROOT-69942', volume.name) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(2, len(nodes)) self.assertEqual('test', nodes[0].name) self.assertEqual('2600', nodes[0].id) self.assertEqual(0, len(nodes[0].private_ips)) self.assertEqual([], nodes[0].extra['security_group']) self.assertEqual(None, nodes[0].extra['key_name']) self.assertEqual(1, len(nodes[0].public_ips)) self.assertEqual('1.1.1.116', nodes[0].public_ips[0]) self.assertEqual(1, len(nodes[0].extra['ip_addresses'])) self.assertEqual(34000, nodes[0].extra['ip_addresses'][0].id) self.assertEqual(1, len(nodes[0].extra['ip_forwarding_rules'])) self.assertEqual('772fd410-6649-43ed-befa-77be986b8906', nodes[0].extra['ip_forwarding_rules'][0].id) self.assertEqual(1, len(nodes[0].extra['port_forwarding_rules'])) self.assertEqual('bc7ea3ee-a2c3-4b86-a53f-01bdaa1b2e32', nodes[0].extra['port_forwarding_rules'][0].id) self.assertEqual({"testkey": "testvalue", "foo": "bar"}, nodes[0].extra['tags']) def test_list_nodes_location_filter(self): def list_nodes_mock(self, **kwargs): self.assertTrue('zoneid' in kwargs) self.assertEqual('1', kwargs.get('zoneid')) body, obj = self._load_fixture('listVirtualMachines_default.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) CloudStackMockHttp._cmd_listVirtualMachines = list_nodes_mock try: location = NodeLocation(1, 'Sydney', 'Unknown', self.driver) self.driver.list_nodes(location=location) finally: del CloudStackMockHttp._cmd_listVirtualMachines def test_ex_get_node(self): node = self.driver.ex_get_node(2600) self.assertEqual('test', node.name) self.assertEqual('2600', node.id) self.assertEqual([], node.extra['security_group']) self.assertEqual(None, node.extra['key_name']) self.assertEqual(1, len(node.public_ips)) self.assertEqual('1.1.1.116', node.public_ips[0]) self.assertEqual(1, len(node.extra['ip_addresses'])) self.assertEqual(34000, node.extra['ip_addresses'][0].id) def test_ex_get_node_doesnt_exist(self): self.assertRaises(Exception, self.driver.ex_get_node(26), node_id=26) def test_list_locations(self): location = self.driver.list_locations()[0] self.assertEqual('1', location.id) self.assertEqual('Sydney', location.name) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual('Compute Micro PRD', sizes[0].name) self.assertEqual('105', sizes[0].id) self.assertEqual(384, sizes[0].ram) self.assertEqual('Compute Large PRD', sizes[2].name) self.assertEqual('69', sizes[2].id) self.assertEqual(6964, sizes[2].ram) def test_ex_start_node(self): node = self.driver.list_nodes()[0] res = node.ex_start() self.assertEqual('Starting', res) def test_ex_stop_node(self): node = self.driver.list_nodes()[0] res = node.ex_stop() self.assertEqual('Stopped', res) def test_destroy_node(self): node = self.driver.list_nodes()[0] res = node.destroy() self.assertTrue(res) def test_expunge_node(self): node = self.driver.list_nodes()[0] res = self.driver.destroy_node(node, ex_expunge=True) self.assertTrue(res) def test_reboot_node(self): node = self.driver.list_nodes()[0] res = node.reboot() self.assertTrue(res) def test_list_key_pairs(self): keypairs = self.driver.list_key_pairs() fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ '00:00:00:00:00' self.assertEqual(keypairs[0].name, 'cs-keypair') self.assertEqual(keypairs[0].fingerprint, fingerprint) # Test old and deprecated way keypairs = self.driver.ex_list_keypairs() self.assertEqual(keypairs[0]['name'], 'cs-keypair') self.assertEqual(keypairs[0]['fingerprint'], fingerprint) def test_list_key_pairs_no_keypair_key(self): CloudStackMockHttp.fixture_tag = 'no_keys' keypairs = self.driver.list_key_pairs() self.assertEqual(keypairs, []) def test_get_key_pair(self): CloudStackMockHttp.fixture_tag = 'get_one' key_pair = self.driver.get_key_pair(name='cs-keypair') self.assertEqual(key_pair.name, 'cs-keypair') def test_get_key_pair_doesnt_exist(self): CloudStackMockHttp.fixture_tag = 'get_one_doesnt_exist' self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, name='does-not-exist') def test_create_keypair(self): key_pair = self.driver.create_key_pair(name='test-keypair') self.assertEqual(key_pair.name, 'test-keypair') self.assertTrue(key_pair.fingerprint is not None) self.assertTrue(key_pair.private_key is not None) # Test old and deprecated way res = self.driver.ex_create_keypair(name='test-keypair') self.assertEqual(res['name'], 'test-keypair') self.assertTrue(res['fingerprint'] is not None) self.assertTrue(res['privateKey'] is not None) def test_import_keypair_from_file(self): fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15' path = os.path.join(os.path.dirname(__file__), 'fixtures', 'cloudstack', 'dummy_rsa.pub') key_pair = self.driver.import_key_pair_from_file('foobar', path) self.assertEqual(key_pair.name, 'foobar') self.assertEqual(key_pair.fingerprint, fingerprint) # Test old and deprecated way res = self.driver.ex_import_keypair('foobar', path) self.assertEqual(res['keyName'], 'foobar') self.assertEqual(res['keyFingerprint'], fingerprint) def test_ex_import_keypair_from_string(self): fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15' path = os.path.join(os.path.dirname(__file__), 'fixtures', 'cloudstack', 'dummy_rsa.pub') fh = open(path) key_material = fh.read() fh.close() key_pair = self.driver.import_key_pair_from_string('foobar', key_material=key_material) self.assertEqual(key_pair.name, 'foobar') self.assertEqual(key_pair.fingerprint, fingerprint) # Test old and deprecated way res = self.driver.ex_import_keypair_from_string('foobar', key_material=key_material) self.assertEqual(res['keyName'], 'foobar') self.assertEqual(res['keyFingerprint'], fingerprint) def test_delete_key_pair(self): key_pair = self.driver.list_key_pairs()[0] res = self.driver.delete_key_pair(key_pair=key_pair) self.assertTrue(res) # Test old and deprecated way res = self.driver.ex_delete_keypair(keypair='cs-keypair') self.assertTrue(res) def test_ex_list_security_groups(self): groups = self.driver.ex_list_security_groups() self.assertEqual(2, len(groups)) self.assertEqual(groups[0]['name'], 'default') self.assertEqual(groups[1]['name'], 'mongodb') def test_ex_list_security_groups_no_securitygroup_key(self): CloudStackMockHttp.fixture_tag = 'no_groups' groups = self.driver.ex_list_security_groups() self.assertEqual(groups, []) def test_ex_create_security_group(self): group = self.driver.ex_create_security_group(name='MySG') self.assertEqual(group['name'], 'MySG') def test_ex_delete_security_group(self): res = self.driver.ex_delete_security_group(name='MySG') self.assertTrue(res) def test_ex_authorize_security_group_ingress(self): res = self.driver.ex_authorize_security_group_ingress('test_sg', 'udp', '0.0.0.0/0', '0', '65535') self.assertEqual(res.get('name'), 'test_sg') self.assertTrue('ingressrule' in res) rules = res['ingressrule'] self.assertEqual(len(rules), 1) rule = rules[0] self.assertEqual(rule['cidr'], '0.0.0.0/0') self.assertEqual(rule['endport'], 65535) self.assertEqual(rule['protocol'], 'udp') self.assertEqual(rule['startport'], 0) def test_ex_create_affinity_group(self): res = self.driver.ex_create_affinity_group('MyAG2', CloudStackAffinityGroupType('MyAGType')) self.assertEqual(res.name, 'MyAG2') self.assertIsInstance(res.type, CloudStackAffinityGroupType) self.assertEqual(res.type.type, 'MyAGType') def test_ex_create_affinity_group_already_exists(self): self.assertRaises(LibcloudError, self.driver.ex_create_affinity_group, 'MyAG', CloudStackAffinityGroupType('MyAGType')) def test_delete_ex_affinity_group(self): afg = self.driver.ex_create_affinity_group('MyAG3', CloudStackAffinityGroupType('MyAGType')) res = self.driver.ex_delete_affinity_group(afg) self.assertTrue(res) def test_ex_update_node_affinity_group(self): affinity_group_list = self.driver.ex_list_affinity_groups() nodes = self.driver.list_nodes() node = self.driver.ex_update_node_affinity_group(nodes[0], affinity_group_list) self.assertEqual(node.extra['affinity_group'][0], affinity_group_list[0].id) def test_ex_list_affinity_groups(self): res = self.driver.ex_list_affinity_groups() self.assertEqual(len(res), 1) self.assertEqual(res[0].id, '11112') self.assertEqual(res[0].name, 'MyAG') self.assertIsInstance(res[0].type, CloudStackAffinityGroupType) self.assertEqual(res[0].type.type, 'MyAGType') def test_ex_list_affinity_group_types(self): res = self.driver.ex_list_affinity_group_types() self.assertEqual(len(res), 1) self.assertIsInstance(res[0], CloudStackAffinityGroupType) self.assertEqual(res[0].type, 'MyAGType') def test_ex_list_public_ips(self): ips = self.driver.ex_list_public_ips() self.assertEqual(ips[0].address, '1.1.1.116') self.assertEqual(ips[0].virtualmachine_id, '2600') def test_ex_allocate_public_ip(self): addr = self.driver.ex_allocate_public_ip() self.assertEqual(addr.address, '7.5.6.1') self.assertEqual(addr.id, '10987171-8cc9-4d0a-b98f-1698c09ddd2d') def test_ex_release_public_ip(self): addresses = self.driver.ex_list_public_ips() res = self.driver.ex_release_public_ip(addresses[0]) self.assertTrue(res) def test_ex_create_port_forwarding_rule(self): node = self.driver.list_nodes()[0] address = self.driver.ex_list_public_ips()[0] private_port = 33 private_end_port = 34 public_port = 33 public_end_port = 34 openfirewall = True protocol = 'TCP' rule = self.driver.ex_create_port_forwarding_rule(node, address, private_port, public_port, protocol, public_end_port, private_end_port, openfirewall) self.assertEqual(rule.address, address) self.assertEqual(rule.protocol, protocol) self.assertEqual(rule.public_port, public_port) self.assertEqual(rule.public_end_port, public_end_port) self.assertEqual(rule.private_port, private_port) self.assertEqual(rule.private_end_port, private_end_port) def test_ex_list_firewall_rules(self): rules = self.driver.ex_list_firewall_rules() self.assertEqual(len(rules), 1) rule = rules[0] self.assertEqual(rule.address.address, '1.1.1.116') self.assertEqual(rule.protocol, 'tcp') self.assertEqual(rule.cidr_list, '192.168.0.0/16') self.assertIsNone(rule.icmp_code) self.assertIsNone(rule.icmp_type) self.assertEqual(rule.start_port, '33') self.assertEqual(rule.end_port, '34') def test_ex_list_firewall_rules_icmp(self): CloudStackMockHttp.fixture_tag = 'firewallicmp' rules = self.driver.ex_list_firewall_rules() self.assertEqual(len(rules), 1) rule = rules[0] self.assertEqual(rule.address.address, '1.1.1.116') self.assertEqual(rule.protocol, 'icmp') self.assertEqual(rule.cidr_list, '192.168.0.0/16') self.assertEqual(rule.icmp_code, 0) self.assertEqual(rule.icmp_type, 8) self.assertIsNone(rule.start_port) self.assertIsNone(rule.end_port) def test_ex_delete_firewall_rule(self): rules = self.driver.ex_list_firewall_rules() res = self.driver.ex_delete_firewall_rule(rules[0]) self.assertTrue(res) def test_ex_create_firewall_rule(self): address = self.driver.ex_list_public_ips()[0] cidr_list = '192.168.0.0/16' protocol = 'TCP' start_port = 33 end_port = 34 rule = self.driver.ex_create_firewall_rule(address, cidr_list, protocol, start_port=start_port, end_port=end_port) self.assertEqual(rule.address, address) self.assertEqual(rule.protocol, protocol) self.assertIsNone(rule.icmp_code) self.assertIsNone(rule.icmp_type) self.assertEqual(rule.start_port, start_port) self.assertEqual(rule.end_port, end_port) def test_ex_create_firewall_rule_icmp(self): address = self.driver.ex_list_public_ips()[0] cidr_list = '192.168.0.0/16' protocol = 'icmp' icmp_code = 0 icmp_type = 8 rule = self.driver.ex_create_firewall_rule(address, cidr_list, protocol, icmp_code=icmp_code, icmp_type=icmp_type) self.assertEqual(rule.address, address) self.assertEqual(rule.protocol, protocol) self.assertEqual(rule.icmp_code, 0) self.assertEqual(rule.icmp_type, 8) self.assertIsNone(rule.start_port) self.assertIsNone(rule.end_port) def test_ex_list_egress_firewall_rules(self): rules = self.driver.ex_list_egress_firewall_rules() self.assertEqual(len(rules), 1) rule = rules[0] self.assertEqual(rule.network_id, '874be2ca-20a7-4360-80e9-7356c0018c0b') self.assertEqual(rule.cidr_list, '192.168.0.0/16') self.assertEqual(rule.protocol, 'tcp') self.assertIsNone(rule.icmp_code) self.assertIsNone(rule.icmp_type) self.assertEqual(rule.start_port, '80') self.assertEqual(rule.end_port, '80') def test_ex_delete_egress_firewall_rule(self): rules = self.driver.ex_list_egress_firewall_rules() res = self.driver.ex_delete_egress_firewall_rule(rules[0]) self.assertTrue(res) def test_ex_create_egress_firewall_rule(self): network_id = '874be2ca-20a7-4360-80e9-7356c0018c0b' cidr_list = '192.168.0.0/16' protocol = 'TCP' start_port = 33 end_port = 34 rule = self.driver.ex_create_egress_firewall_rule( network_id, cidr_list, protocol, start_port=start_port, end_port=end_port) self.assertEqual(rule.network_id, network_id) self.assertEqual(rule.cidr_list, cidr_list) self.assertEqual(rule.protocol, protocol) self.assertIsNone(rule.icmp_code) self.assertIsNone(rule.icmp_type) self.assertEqual(rule.start_port, start_port) self.assertEqual(rule.end_port, end_port) def test_ex_list_port_forwarding_rules(self): rules = self.driver.ex_list_port_forwarding_rules() self.assertEqual(len(rules), 1) rule = rules[0] self.assertTrue(rule.node) self.assertEqual(rule.protocol, 'tcp') self.assertEqual(rule.public_port, '33') self.assertEqual(rule.public_end_port, '34') self.assertEqual(rule.private_port, '33') self.assertEqual(rule.private_end_port, '34') self.assertEqual(rule.address.address, '1.1.1.116') def test_ex_delete_port_forwarding_rule(self): node = self.driver.list_nodes()[0] rule = self.driver.ex_list_port_forwarding_rules()[0] res = self.driver.ex_delete_port_forwarding_rule(node, rule) self.assertTrue(res) def test_node_ex_delete_port_forwarding_rule(self): node = self.driver.list_nodes()[0] self.assertEqual(len(node.extra['port_forwarding_rules']), 1) node.extra['port_forwarding_rules'][0].delete() self.assertEqual(len(node.extra['port_forwarding_rules']), 0) def test_node_ex_create_port_forwarding_rule(self): node = self.driver.list_nodes()[0] self.assertEqual(len(node.extra['port_forwarding_rules']), 1) address = self.driver.ex_list_public_ips()[0] private_port = 33 private_end_port = 34 public_port = 33 public_end_port = 34 openfirewall = True protocol = 'TCP' rule = node.ex_create_port_forwarding_rule(address, private_port, public_port, protocol, public_end_port, private_end_port, openfirewall) self.assertEqual(rule.address, address) self.assertEqual(rule.protocol, protocol) self.assertEqual(rule.public_port, public_port) self.assertEqual(rule.public_end_port, public_end_port) self.assertEqual(rule.private_port, private_port) self.assertEqual(rule.private_end_port, private_end_port) self.assertEqual(len(node.extra['port_forwarding_rules']), 2) def test_ex_list_ip_forwarding_rules(self): rules = self.driver.ex_list_ip_forwarding_rules() self.assertEqual(len(rules), 1) rule = rules[0] self.assertTrue(rule.node) self.assertEqual(rule.protocol, 'tcp') self.assertEqual(rule.start_port, 33) self.assertEqual(rule.end_port, 34) self.assertEqual(rule.address.address, '1.1.1.116') def test_ex_limits(self): limits = self.driver.ex_limits() self.assertEqual(limits['max_images'], 20) self.assertEqual(limits['max_networks'], 20) self.assertEqual(limits['max_public_ips'], -1) self.assertEqual(limits['max_vpc'], 20) self.assertEqual(limits['max_instances'], 20) self.assertEqual(limits['max_projects'], -1) self.assertEqual(limits['max_volumes'], 20) self.assertEqual(limits['max_snapshots'], 20) def test_ex_create_tags(self): node = self.driver.list_nodes()[0] tags = {'Region': 'Canada'} resp = self.driver.ex_create_tags([node.id], 'UserVm', tags) self.assertTrue(resp) def test_ex_delete_tags(self): node = self.driver.list_nodes()[0] tag_keys = ['Region'] resp = self.driver.ex_delete_tags([node.id], 'UserVm', tag_keys) self.assertTrue(resp) def test_list_snapshots(self): snapshots = self.driver.list_snapshots() self.assertEqual(len(snapshots), 3) snap = snapshots[0] self.assertEqual(snap.id, 188402) self.assertEqual(snap.extra['name'], "i-123-87654-VM_ROOT-12344_20140917105548") self.assertEqual(snap.extra['volume_id'], 89341) def test_create_volume_snapshot(self): volume = self.driver.list_volumes()[0] snapshot = self.driver.create_volume_snapshot(volume) self.assertEqual(snapshot.id, 190547) self.assertEqual(snapshot.extra['name'], "i-123-87654-VM_ROOT-23456_20140917105548") self.assertEqual(snapshot.extra['volume_id'], "fe1ada16-57a0-40ae-b577-01a153690fb4") def test_destroy_volume_snapshot(self): snapshot = self.driver.list_snapshots()[0] resp = self.driver.destroy_volume_snapshot(snapshot) self.assertTrue(resp) def test_ex_create_snapshot_template(self): snapshot = self.driver.list_snapshots()[0] template = self.driver.ex_create_snapshot_template(snapshot, "test-libcloud-template", 99) self.assertEqual(template.id, '10260') self.assertEqual(template.name, "test-libcloud-template") self.assertEqual(template.extra['displaytext'], "test-libcloud-template") self.assertEqual(template.extra['hypervisor'], "VMware") self.assertEqual(template.extra['os'], "Other Linux (64-bit)") def test_ex_list_os_types(self): os_types = self.driver.ex_list_os_types() self.assertEqual(len(os_types), 146) self.assertEqual(os_types[0]['id'], 69) self.assertEqual(os_types[0]['oscategoryid'], 7) self.assertEqual(os_types[0]['description'], "Asianux 3(32-bit)") def test_ex_list_vpn_gateways(self): vpn_gateways = self.driver.ex_list_vpn_gateways() self.assertEqual(len(vpn_gateways), 1) self.assertEqual(vpn_gateways[0].id, 'cffa0cab-d1da-42a7-92f6-41379267a29f') self.assertEqual(vpn_gateways[0].account, 'some_account') self.assertEqual(vpn_gateways[0].domain, 'some_domain') self.assertEqual(vpn_gateways[0].domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8') self.assertEqual(vpn_gateways[0].public_ip, '1.2.3.4') self.assertEqual(vpn_gateways[0].vpc_id, '4d25e181-8850-4d52-8ecb-a6f35bbbabde') def test_ex_create_vpn_gateway(self): vpc = self.driver.ex_list_vpcs()[0] vpn_gateway = self.driver.ex_create_vpn_gateway(vpc) self.assertEqual(vpn_gateway.id, '5ef6794e-cec8-4018-9fef-c4dacbadee14') self.assertEqual(vpn_gateway.account, 'some_account') self.assertEqual(vpn_gateway.domain, 'some_domain') self.assertEqual(vpn_gateway.domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8') self.assertEqual(vpn_gateway.public_ip, '2.3.4.5') self.assertEqual(vpn_gateway.vpc_id, vpc.id) def test_ex_delete_vpn_gateway(self): vpn_gateway = self.driver.ex_list_vpn_gateways()[0] self.assertTrue(vpn_gateway.delete()) def test_ex_list_vpn_customer_gateways(self): vpn_customer_gateways = self.driver.ex_list_vpn_customer_gateways() self.assertEqual(len(vpn_customer_gateways), 1) self.assertEqual(vpn_customer_gateways[0].id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea') self.assertEqual(vpn_customer_gateways[0].cidr_list, '10.2.2.0/24') self.assertEqual(vpn_customer_gateways[0].esp_policy, '3des-md5') self.assertEqual(vpn_customer_gateways[0].gateway, '10.2.2.1') self.assertEqual(vpn_customer_gateways[0].ike_policy, '3des-md5') self.assertEqual(vpn_customer_gateways[0].ipsec_psk, 'some_psk') def test_ex_create_vpn_customer_gateway(self): vpn_customer_gateway = self.driver.ex_create_vpn_customer_gateway( cidr_list='10.0.0.0/24', esp_policy='3des-md5', gateway='10.0.0.1', ike_policy='3des-md5', ipsec_psk='ipsecpsk') self.assertEqual(vpn_customer_gateway.id, 'cef3c766-116a-4e83-9844-7d08ab7d3fd4') self.assertEqual(vpn_customer_gateway.esp_policy, '3des-md5') self.assertEqual(vpn_customer_gateway.gateway, '10.0.0.1') self.assertEqual(vpn_customer_gateway.ike_policy, '3des-md5') self.assertEqual(vpn_customer_gateway.ipsec_psk, 'ipsecpsk') def test_ex_ex_delete_vpn_customer_gateway(self): vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0] self.assertTrue(vpn_customer_gateway.delete()) def test_ex_list_vpn_connections(self): vpn_connections = self.driver.ex_list_vpn_connections() self.assertEqual(len(vpn_connections), 1) self.assertEqual(vpn_connections[0].id, '8f482d9a-6cee-453b-9e78-b0e1338ffce9') self.assertEqual(vpn_connections[0].passive, False) self.assertEqual(vpn_connections[0].vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea') self.assertEqual(vpn_connections[0].vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f') self.assertEqual(vpn_connections[0].state, 'Connected') def test_ex_create_vpn_connection(self): vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0] vpn_gateway = self.driver.ex_list_vpn_gateways()[0] vpn_connection = self.driver.ex_create_vpn_connection( vpn_customer_gateway, vpn_gateway) self.assertEqual(vpn_connection.id, 'f45c3af8-f909-4f16-9d40-ed4409c575f8') self.assertEqual(vpn_connection.passive, False) self.assertEqual(vpn_connection.vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea') self.assertEqual(vpn_connection.vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f') self.assertEqual(vpn_connection.state, 'Connected') def test_ex_delete_vpn_connection(self): vpn_connection = self.driver.ex_list_vpn_connections()[0] self.assertTrue(vpn_connection.delete()) class CloudStackTestCase(CloudStackCommonTestCase, unittest.TestCase): def test_driver_instantiation(self): urls = [ 'http://api.exoscale.ch/compute1', # http, default port 'https://api.exoscale.ch/compute2', # https, default port 'http://api.exoscale.ch:8888/compute3', # https, custom port 'https://api.exoscale.ch:8787/compute4', # https, custom port 'https://api.test.com/compute/endpoint' # https, default port ] expected_values = [ {'host': 'api.exoscale.ch', 'port': 80, 'path': '/compute1'}, {'host': 'api.exoscale.ch', 'port': 443, 'path': '/compute2'}, {'host': 'api.exoscale.ch', 'port': 8888, 'path': '/compute3'}, {'host': 'api.exoscale.ch', 'port': 8787, 'path': '/compute4'}, {'host': 'api.test.com', 'port': 443, 'path': '/compute/endpoint'} ] cls = get_driver(Provider.CLOUDSTACK) for url, expected in zip(urls, expected_values): driver = cls('key', 'secret', url=url) self.assertEqual(driver.host, expected['host']) self.assertEqual(driver.path, expected['path']) self.assertEqual(driver.connection.port, expected['port']) def test_user_must_provide_host_and_path_or_url(self): expected_msg = ('When instantiating CloudStack driver directly ' 'you also need to provide url or host and path ' 'argument') cls = get_driver(Provider.CLOUDSTACK) self.assertRaisesRegexp(Exception, expected_msg, cls, 'key', 'secret') try: cls('key', 'secret', True, 'localhost', '/path') except Exception: self.fail('host and path provided but driver raised an exception') try: cls('key', 'secret', url='https://api.exoscale.ch/compute') except Exception: self.fail('url provided but driver raised an exception') class CloudStackMockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('cloudstack') fixture_tag = 'default' def _load_fixture(self, fixture): body = self.fixtures.load(fixture) return body, json.loads(body) def _test_path_invalid_credentials(self, method, url, body, headers): body = '' return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) def _test_path_api_error(self, method, url, body, headers): body = self.fixtures.load('registerSSHKeyPair_error.json') return (431, body, {}, httplib.responses[httplib.OK]) def _test_path(self, method, url, body, headers): url = urlparse.urlparse(url) query = dict(parse_qsl(url.query)) self.assertTrue('apiKey' in query) self.assertTrue('command' in query) self.assertTrue('response' in query) self.assertTrue('signature' in query) self.assertTrue(query['response'] == 'json') del query['apiKey'] del query['response'] del query['signature'] command = query.pop('command') if hasattr(self, '_cmd_' + command): return getattr(self, '_cmd_' + command)(**query) else: fixture = command + '_' + self.fixture_tag + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _test_path_userdata(self, method, url, body, headers): if 'deployVirtualMachine' in url: self.assertUrlContainsQueryParams(url, {'userdata': 'Zm9vYmFy'}) return self._test_path(method, url, body, headers) def _cmd_queryAsyncJobResult(self, jobid): fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_elasticstack.py0000664000175000017500000002544713153541406024747 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.compute.base import Node from libcloud.compute.drivers.elasticstack import ElasticStackException from libcloud.compute.drivers.elastichosts import \ ElasticHostsNodeDriver as ElasticHosts from libcloud.compute.drivers.skalicloud import \ SkaliCloudNodeDriver as SkaliCloud from libcloud.compute.drivers.serverlove import \ ServerLoveNodeDriver as ServerLove from libcloud.common.types import InvalidCredsError, MalformedResponseError from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import ComputeFileFixtures class ElasticStackTestCase(object): def setUp(self): # Re-use ElasticHosts fixtures for the base ElasticStack platform tests self.mockHttp = ElasticStackMockHttp self.mockHttp.type = None self.node = Node(id=72258, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) def test_invalid_creds(self): self.mockHttp.type = 'UNAUTHORIZED' try: self.driver.list_nodes() except InvalidCredsError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, InvalidCredsError)) else: self.fail('test should have thrown') def test_malformed_response(self): self.mockHttp.type = 'MALFORMED' try: self.driver.list_nodes() except MalformedResponseError: pass else: self.fail('test should have thrown') def test_parse_error(self): self.mockHttp.type = 'PARSE_ERROR' try: self.driver.list_nodes() except Exception: e = sys.exc_info()[1] self.assertTrue(str(e).find('X-Elastic-Error') != -1) else: self.fail('test should have thrown') def test_ex_set_node_configuration(self): success = self.driver.ex_set_node_configuration(node=self.node, name='name', cpu='2') self.assertTrue(success) def test_ex_set_node_configuration_invalid_keys(self): try: self.driver.ex_set_node_configuration(node=self.node, foo='bar') except ElasticStackException: pass else: self.fail( 'Invalid option specified, but an exception was not thrown') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertTrue(isinstance(nodes, list)) self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node.public_ips[0], "1.2.3.4") self.assertEqual(node.public_ips[1], "1.2.3.5") self.assertEqual(node.extra['smp'], 1) self.assertEqual( node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") def test_list_offline_node(self): self.mockHttp.type = 'OFFLINE' nodes = self.driver.list_nodes() self.assertTrue(isinstance(nodes, list)) self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(len(node.public_ips), 0, "Public IPs was not empty") self.assertNotIn('smp', node.extra) self.assertNotIn('started', node.extra) self.assertEqual( node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") def test_list_sizes(self): images = self.driver.list_sizes() self.assertEqual(len(images), 6) image = [i for i in images if i.id == 'small'][0] self.assertEqual(image.id, 'small') self.assertEqual(image.name, 'Small instance') self.assertEqual(image.cpu, 2000) self.assertEqual(image.ram, 1700) self.assertEqual(image.disk, 160) self.assertTrue(isinstance(image.price, float)) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), len(self.driver._standard_drives)) for uuid, values in list(self.driver._standard_drives.items()): self.assertEqual( len([image for image in images if image.id == uuid]), 1) def test_reboot_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.reboot_node(node)) def test_destroy_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.destroy_node(node)) def test_create_node(self): sizes = self.driver.list_sizes() size = [s for s in sizes if s.id == 'large'][0] image = self.image self.assertTrue(self.driver.create_node(name="api.ivan.net.nz", image=image, size=size)) class ElasticHostsTestCase(ElasticStackTestCase, unittest.TestCase): def setUp(self): ElasticHosts.connectionCls.conn_class = ElasticStackMockHttp self.driver = ElasticHosts('foo', 'bar') images = self.driver.list_images() self.image = [i for i in images if i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0] super(ElasticHostsTestCase, self).setUp() def test_multiple_drivers_with_different_regions(self): driver1 = ElasticHosts('foo', 'bar', region='lon-p') driver2 = ElasticHosts('foo', 'bar', region='sat-p') self.assertTrue(driver1.connection.host.startswith('api-lon-p')) self.assertTrue(driver2.connection.host.startswith('api-sat-p')) driver1.list_nodes() driver2.list_nodes() driver1.list_nodes() self.assertTrue(driver1.connection.host.startswith('api-lon-p')) self.assertTrue(driver2.connection.host.startswith('api-sat-p')) def test_invalid_region(self): expected_msg = r'Invalid region.+' self.assertRaisesRegexp(ValueError, expected_msg, ElasticHosts, 'foo', 'bar', region='invalid') class SkaliCloudTestCase(ElasticStackTestCase, unittest.TestCase): def setUp(self): SkaliCloud.connectionCls.conn_class = ElasticStackMockHttp self.driver = SkaliCloud('foo', 'bar') images = self.driver.list_images() self.image = [i for i in images if i.id == '90aa51f2-15c0-4cff-81ee-e93aa20b9468'][0] super(SkaliCloudTestCase, self).setUp() class ServerLoveTestCase(ElasticStackTestCase, unittest.TestCase): def setUp(self): ServerLove.connectionCls.conn_class = ElasticStackMockHttp self.driver = ServerLove('foo', 'bar') images = self.driver.list_images() self.image = [i for i in images if i.id == '679f5f44-0be7-4745-a658-cccd4334c1aa'][0] super(ServerLoveTestCase, self).setUp() class ElasticStackMockHttp(MockHttp): fixtures = ComputeFileFixtures('elastichosts') def _servers_info_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_info_MALFORMED(self, method, url, body, headers): body = "{malformed: '" return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_info_PARSE_ERROR(self, method, url, body, headers): return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers): return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers): return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _drives_create(self, method, url, body, headers): body = self.fixtures.load('drives_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method, url, body, headers): # ElasticHosts image return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_90aa51f2_15c0_4cff_81ee_e93aa20b9468_gunzip(self, method, url, body, headers): # Skalikloud image return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_679f5f44_0be7_4745_a658_cccd4334c1aa_gunzip(self, method, url, body, headers): # ServerLove image return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers): body = self.fixtures.load('drives_info.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _servers_create(self, method, url, body, headers): body = self.fixtures.load('servers_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _servers_info(self, method, url, body, headers): body = self.fixtures.load('servers_info.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _servers_info_OFFLINE(self, method, url, body, headers): body = self.fixtures.load('offline_servers_info.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _servers_72258_set(self, method, url, body, headers): body = '{}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_vcl.py0000664000175000017500000001132513153541406023047 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib from libcloud.compute.drivers.vcl import VCLNodeDriver as VCL from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VCL_PARAMS class VCLTests(unittest.TestCase): def setUp(self): VCL.connectionCls.conn_class = VCLMockHttp VCLMockHttp.type = None self.driver = VCL(*VCL_PARAMS) def test_list_nodes(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['pass'], 'ehkNGW') def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(image.id, '8') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 1) def test_create_node(self): image = self.driver.list_images()[0] node = self.driver.create_node(image=image) self.assertEqual(node.id, '51') def test_destroy_node(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertTrue(self.driver.destroy_node(node)) def test_ex_update_node_access(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] node = self.driver.ex_update_node_access(node, ipaddr='192.168.1.2') self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['pass'], 'ehkNGW') def test_ex_extend_request_time(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertTrue(self.driver.ex_extend_request_time(node, 60)) def test_ex_get_request_end_time(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertEqual( self.driver.ex_get_request_end_time(node), 1334168100 ) class VCLMockHttp(MockHttp): fixtures = ComputeFileFixtures('vcl') def _get_method_name(self, type, use_param, qs, path): return "_xmlrpc" def _xmlrpc(self, method, url, body, headers): params, meth_name = xmlrpclib.loads(body) if self.type: meth_name = "%s_%s" % (meth_name, self.type) return getattr(self, meth_name)(method, url, body, headers) def XMLRPCgetImages(self, method, url, body, headers): body = self.fixtures.load('XMLRPCgetImages.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCextendRequest(self, method, url, body, headers): body = self.fixtures.load('XMLRPCextendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestIds(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestIds.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestStatus(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestStatus.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCendRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCaddRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCaddRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestConnectData(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestConnectData.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_exoscale.py0000664000175000017500000000217312701023453024062 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.exoscale import ExoscaleNodeDriver from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase from libcloud.test import unittest class ExoscaleNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): driver_klass = ExoscaleNodeDriver if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_libvirt_driver.py0000664000175000017500000000722213153541406025312 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import mock from libcloud.compute.drivers.libvirt_driver import LibvirtNodeDriver from libcloud.compute.drivers.libvirt_driver import have_libvirt from libcloud.utils.py3 import PY3 from libcloud.test import unittest __all__ = [ 'LibvirtNodeDriverTestCase' ] @unittest.skipIf(not have_libvirt, 'libvirt not available, skipping tests') @mock.patch('libcloud.compute.drivers.libvirt_driver.libvirt', autospec=True) class LibvirtNodeDriverTestCase(unittest.TestCase): arp_output_str = """? (1.2.10.80) at 52:54:00:bc:f9:6c [ether] on br0 ? (1.2.10.33) at 52:54:00:04:89:51 [ether] on br0 ? (1.2.10.97) at 52:54:00:c6:40:ec [ether] on br0 ? (1.2.10.40) at 52:54:00:77:1c:83 [ether] on br0""" ip_output_str = """1.2.10.80 dev br0 lladdr 52:54:00:bc:f9:6c STALE 1.2.10.33 dev br0 lladdr 52:54:00:04:89:51 REACHABLE 1.2.10.97 dev br0 lladdr 52:54:00:c6:40:ec DELAY 1.2.10.40 dev br0 lladdr 52:54:00:77:1c:83 STALE""" bad_output_str = """1.2.10.80 dev br0 52:54:00:bc:f9:6c STALE 1.2.10.33 dev br0 lladdr 52:54:00:04:89:51 REACHABLE 1.2.10.97 dev br0 lladdr 1.2.10.40 dev br0 lladdr 52:54:00:77:1c:83 STALE""" if PY3: from libcloud.utils.py3 import b arp_output_str = b(arp_output_str) ip_output_str = b(ip_output_str) bad_output_str = b(bad_output_str) def _assert_arp_table(self, arp_table): self.assertIn('52:54:00:bc:f9:6c', arp_table) self.assertIn('52:54:00:04:89:51', arp_table) self.assertIn('52:54:00:c6:40:ec', arp_table) self.assertIn('52:54:00:77:1c:83', arp_table) self.assertIn('1.2.10.80', arp_table['52:54:00:bc:f9:6c']) self.assertIn('1.2.10.33', arp_table['52:54:00:04:89:51']) self.assertIn('1.2.10.97', arp_table['52:54:00:c6:40:ec']) self.assertIn('1.2.10.40', arp_table['52:54:00:77:1c:83']) def test_arp_map(self, *args, **keywargs): driver = LibvirtNodeDriver('') arp_table = driver._parse_ip_table_arp(self.arp_output_str) self._assert_arp_table(arp_table) def test_ip_map(self, *args, **keywargs): driver = LibvirtNodeDriver('') arp_table = driver._parse_ip_table_neigh(self.ip_output_str) self._assert_arp_table(arp_table) def test_bad_map(self, *args, **keywargs): driver = LibvirtNodeDriver('') arp_table = driver._parse_ip_table_neigh(self.bad_output_str) # we should at least get the correctly formatted lines self.assertEqual(len(arp_table), 2) arp_table = driver._parse_ip_table_neigh(self.arp_output_str) # nothing should match if the wrong output is sent self.assertEqual(len(arp_table), 0) def test_list_nodes(self, *args, **keywargs): driver = LibvirtNodeDriver('') nodes = driver.list_nodes() self.assertEqual(type([]), type(nodes)) self.assertEqual(len(nodes), 0) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_abiquo.py0000664000175000017500000005067013153541406023551 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abiquo Test Suite """ import sys from libcloud.utils.py3 import ET from libcloud.utils.py3 import httplib from libcloud.compute.drivers.abiquo import AbiquoNodeDriver from libcloud.common.abiquo import ForbiddenError, get_href from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.base import NodeLocation, NodeImage from libcloud.test.compute import TestCaseMixin from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import ComputeFileFixtures class AbiquoNodeDriverTest(TestCaseMixin, unittest.TestCase): """ Abiquo Node Driver test suite """ @classmethod def setUpClass(cls): """ Set up the driver with the main user """ AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp cls.driver = AbiquoNodeDriver('son', 'goku', 'http://dummy.host.com/api') def test_unauthorized_controlled(self): """ Test the Unauthorized Exception is Controlled. Test, through the 'login' method, that a '401 Unauthorized' raises a 'InvalidCredsError' instead of the 'MalformedUrlException' """ self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son', 'goten', 'http://dummy.host.com/api') def test_forbidden_controlled(self): """ Test the Forbidden Exception is Controlled. Test, through the 'list_images' method, that a '403 Forbidden' raises an 'ForbidenError' instead of the 'MalformedUrlException' """ AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api') self.assertRaises(ForbiddenError, conn.list_images) def test_handle_other_errors_such_as_not_found(self): """ Test common 'logical' exceptions are controlled. Test that common exception (normally 404-Not Found and 409-Conflict), that return an XMLResponse with the explanation of the errors are controlled. """ self.driver = AbiquoNodeDriver('go', 'trunks', 'http://dummy.host.com/api') self.assertRaises(LibcloudError, self.driver.list_images) def test_ex_create_and_delete_empty_group(self): """ Test the creation and deletion of an empty group. """ group = self.driver.ex_create_group('libcloud_test_group') group.destroy() def test_create_node_no_image_raise_exception(self): """ Test 'create_node' without image. Test the 'create_node' function without 'image' parameter raises an Exception """ self.assertRaises(LibcloudError, self.driver.create_node) def test_list_locations_response(self): if not self.should_list_locations: return None locations = self.driver.list_locations() self.assertTrue(isinstance(locations, list)) def test_create_node_specify_location(self): """ Test you can create a node specifying the location. """ image = self.driver.list_images()[0] location = self.driver.list_locations()[0] self.driver.create_node(image=image, location=location) def test_create_node_specify_wrong_location(self): """ Test you can not create a node with wrong location. """ image = self.driver.list_images()[0] location = NodeLocation(435, 'fake-location', 'Spain', self.driver) self.assertRaises(LibcloudError, self.driver.create_node, image=image, location=location) def test_create_node_specify_wrong_image(self): """ Test image compatibility. Some locations only can handle a group of images, not all of them. Test you can not create a node with incompatible image-location. """ # Create fake NodeImage image = NodeImage(3234, 'dummy-image', self.driver) location = self.driver.list_locations()[0] # With this image, it should raise an Exception self.assertRaises(LibcloudError, self.driver.create_node, image=image, location=location) def test_create_node_specify_group_name(self): """ Test 'create_node' into a concrete group. """ image = self.driver.list_images()[0] self.driver.create_node(image=image, group_name='new_group_name') def test_create_group_location_does_not_exist(self): """ Test 'create_node' with an unexistent location. Defines a 'fake' location and tries to create a node into it. """ location = NodeLocation(435, 'fake-location', 'Spain', self.driver) # With this location, it should raise an Exception self.assertRaises(LibcloudError, self.driver.ex_create_group, name='new_group_name', location=location) def test_destroy_node_response(self): """ 'destroy_node' basic test. Override the destroy to return a different node available to be undeployed. (by default it returns an already undeployed node, for test creation). """ self.driver = AbiquoNodeDriver('go', 'trunks', 'http://dummy.host.com/api') node = self.driver.list_nodes()[0] ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_destroy_node_response_failed(self): """ 'destroy_node' asynchronous error. Test that the driver handles correctly when, for some reason, the 'destroy' job fails. """ self.driver = AbiquoNodeDriver('muten', 'roshi', 'http://dummy.host.com/api') node = self.driver.list_nodes()[0] ret = self.driver.destroy_node(node) self.assertFalse(ret) def test_destroy_node_allocation_state(self): """ Test the 'destroy_node' invalid state. Try to destroy a node when the node is not running. """ self.driver = AbiquoNodeDriver('ve', 'geta', 'http://dummy.host.com/api') # Override the destroy to return a different node available to be # undeployed node = self.driver.list_nodes()[0] # The mock class with the user:password 've:geta' returns a node that # is in 'ALLOCATION' state and hence, the 'destroy_node' method should # raise a LibcloudError self.assertRaises(LibcloudError, self.driver.destroy_node, node) def test_destroy_not_deployed_group(self): """ Test 'ex_destroy_group' when group is not deployed. """ location = self.driver.list_locations()[0] group = self.driver.ex_list_groups(location)[1] self.assertTrue(group.destroy()) def test_destroy_deployed_group(self): """ Test 'ex_destroy_group' when there are machines running. """ location = self.driver.list_locations()[0] group = self.driver.ex_list_groups(location)[0] self.assertTrue(group.destroy()) def test_destroy_deployed_group_failed(self): """ Test 'ex_destroy_group' fails. Test driver handles correctly when, for some reason, the asynchronous job fails. """ self.driver = AbiquoNodeDriver('muten', 'roshi', 'http://dummy.host.com/api') location = self.driver.list_locations()[0] group = self.driver.ex_list_groups(location)[0] self.assertFalse(group.destroy()) def test_destroy_group_invalid_state(self): """ Test 'ex_destroy_group' invalid state. Test the Driver raises an exception when the group is in invalid temporal state. """ self.driver = AbiquoNodeDriver('ve', 'geta', 'http://dummy.host.com/api') location = self.driver.list_locations()[0] group = self.driver.ex_list_groups(location)[1] self.assertRaises(LibcloudError, group.destroy) def test_run_node(self): """ Test 'ex_run_node' feature. """ node = self.driver.list_nodes()[0] # Node is by default in NodeState.TERMINATED and AbiquoState == # 'NOT_ALLOCATED' # so it is available to be runned self.driver.ex_run_node(node) def test_run_node_invalid_state(self): """ Test 'ex_run_node' invalid state. Test the Driver raises an exception when try to run a node that is in invalid state to run. """ self.driver = AbiquoNodeDriver('go', 'trunks', 'http://dummy.host.com/api') node = self.driver.list_nodes()[0] # Node is by default in AbiquoState = 'ON' for user 'go:trunks' # so is not available to be runned self.assertRaises(LibcloudError, self.driver.ex_run_node, node) def test_run_node_failed(self): """ Test 'ex_run_node' fails. Test driver handles correctly when, for some reason, the asynchronous job fails. """ self.driver = AbiquoNodeDriver('ten', 'shin', 'http://dummy.host.com/api') node = self.driver.list_nodes()[0] # Node is in the correct state, but it fails because of the # async task and it raises the error. self.assertRaises(LibcloudError, self.driver.ex_run_node, node) def test_get_href(self): xml = ''' ''' elem = ET.XML(xml) href = get_href(element=elem, rel='edit1') self.assertEqual(href, '/admin/datacenters/2') href = get_href(element=elem, rel='edit2') self.assertEqual(href, '/admin/datacenters/3') href = get_href(element=elem, rel='edit3') self.assertEqual(href, '/admin/enterprises/1234') class AbiquoMockHttp(MockHttp): """ Mock the functionallity of the remote Abiquo API. """ fixtures = ComputeFileFixtures('abiquo') fixture_tag = 'default' def _api_login(self, method, url, body, headers): if headers['Authorization'] == 'Basic c29uOmdvdGVu': expected_response = self.fixtures.load('unauthorized_user.html') expected_status = httplib.UNAUTHORIZED else: expected_response = self.fixtures.load('login.xml') expected_status = httplib.OK return (expected_status, expected_response, {}, '') def _api_cloud_virtualdatacenters(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '') def _api_cloud_virtualdatacenters_4(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers): if method == 'POST': vapp_name = ET.XML(body).findtext('name') if vapp_name == 'libcloud_test_group': # we come from 'test_ex_create_and_delete_empty_group(self):' # method and so, we return the 'ok' return response = self.fixtures.load('vdc_4_vapp_creation_ok.xml') return (httplib.OK, response, {}, '') elif vapp_name == 'new_group_name': # we come from 'test_ex_create_and_delete_empty_group(self):' # method and so, we return the 'ok' return response = self.fixtures.load('vdc_4_vapp_creation_ok.xml') return (httplib.OK, response, {}, '') else: # It will be a 'GET'; return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers): if method == 'GET': if headers['Authorization'] == 'Basic dmU6Z2V0YQ==': # Try to destroy a group with 'needs_sync' state response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml') else: # Try to destroy a group with 'undeployed' state response = self.fixtures.load('vdc_4_vapp_5.xml') return (httplib.OK, response, {}, '') else: # it will be a 'DELETE' return (httplib.NO_CONTENT, '', {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers): if method == 'GET': # deployed vapp response = self.fixtures.load('vdc_4_vapp_6.xml') return (httplib.OK, response, {}, '') else: # it will be a 'DELETE' return (httplib.NO_CONTENT, '', {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers): if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=': # User 'muten:roshi' failed task response = self.fixtures.load( 'vdc_4_vapp_6_undeploy_task_failed.xml') else: response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml') return (httplib.OK, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines( self, method, url, body, headers): # This virtual app never have virtual machines if method == 'GET': response = self.fixtures.load('vdc_4_vapp_5_vms.xml') return (httplib.OK, response, {}, '') elif method == 'POST': # it must be a POST response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml') return (httplib.CREATED, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines( self, method, url, body, headers): # Default-created virtual app virtual machines' if method == 'GET': if headers['Authorization'] == 'Basic dmU6Z2V0YQ==': response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml') else: response = self.fixtures.load('vdc_4_vapp_6_vms.xml') return (httplib.OK, response, {}, '') else: # it must be a POST response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml') return (httplib.CREATED, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers): if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='): # Undeploy node response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml") elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==': # Try to undeploy a node with 'allocation' state response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml') else: # Get node response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml') return (httplib.OK, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url, body, headers): response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml') return (httplib.CREATED, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers): if headers['Authorization'] == 'Basic dGVuOnNoaW4=': # User 'ten:shin' failed task response = self.fixtures.load( 'vdc_4_vapp_6_vm_3_deploy_task_failed.xml') else: response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml') return (httplib.OK, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy( self, method, url, body, headers): response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml') return (httplib.OK, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method, url, body, headers): response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml') return (httplib.CREATED, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers): if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=': # User 'muten:roshi' failed task response = self.fixtures.load( 'vdc_4_vapp_6_undeploy_task_failed.xml') else: response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml') return (httplib.OK, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url, body, headers): response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml') return (httplib.CREATED, response, {}, '') def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url, body, headers): response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml') return (httplib.OK, response, {}, '') def _api_admin_datacenters(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '') def _api_admin_enterprises_1(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '') def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers): # When the user is the common one for all the tests ('son, 'goku') # it creates this basic auth and we return the datacenters value if headers['Authorization'] == 'Basic Z286dHJ1bmtz': expected_response = self.fixtures.load("not_found_error.xml") return (httplib.NOT_FOUND, expected_response, {}, '') elif headers['Authorization'] != 'Basic c29uOmdvaGFu': return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '') else: # son:gohan user: forbidden error expected_response = self.fixtures.load("privilege_errors.html") return (httplib.FORBIDDEN, expected_response, {}, '') def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '') def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'), {}, '') def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers): return ( httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'), {}, '') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_linode.py0000664000175000017500000002111213153541406023530 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Maintainer: Jed Smith # Based upon code written by Alex Polvi # import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.linode import LinodeNodeDriver from libcloud.compute.base import Node, NodeAuthPassword from libcloud.compute.base import NodeAuthSSHKey, StorageVolume from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class LinodeTest(unittest.TestCase, TestCaseMixin): # The Linode test suite def setUp(self): LinodeNodeDriver.connectionCls.conn_class = LinodeMockHttp LinodeMockHttp.use_param = 'api_action' self.driver = LinodeNodeDriver('foo') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node.id, "8098") self.assertEqual(node.name, 'api-node3') self.assertEqual(node.extra['PLANID'], '2') self.assertTrue('75.127.96.245' in node.public_ips) self.assertEqual(node.private_ips, []) def test_reboot_node(self): # An exception would indicate failure node = self.driver.list_nodes()[0] self.driver.reboot_node(node) def test_destroy_node(self): # An exception would indicate failure node = self.driver.list_nodes()[0] self.driver.destroy_node(node) def test_create_node_password_auth(self): # Will exception on failure self.driver.create_node(name="Test", location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[6], auth=NodeAuthPassword("test123")) def test_create_node_ssh_key_auth(self): # Will exception on failure node = self.driver.create_node(name="Test", location=self.driver.list_locations()[ 0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[6], auth=NodeAuthSSHKey('foo')) self.assertTrue(isinstance(node, Node)) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 9) for size in sizes: self.assertEqual(size.ram, int(size.name.split(" ")[1])) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 30) def test_create_node_response(self): # should return a node object node = self.driver.create_node(name="node-name", location=self.driver.list_locations()[ 0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0], auth=NodeAuthPassword("foobar")) self.assertTrue(isinstance(node, Node)) def test_destroy_volume(self): # Will exception on failure node = self.driver.list_nodes()[0] volume = StorageVolume(id=55648, name="test", size=1024, driver=self.driver, extra={"LINODEID": node.id}) self.driver.destroy_volume(volume) def test_ex_create_volume(self): # should return a StorageVolume object node = self.driver.list_nodes()[0] volume = self.driver.ex_create_volume(size=4096, name="Another test image", node=node, fs_type="ext4") self.assertTrue(isinstance(volume, StorageVolume)) def test_ex_list_volumes(self): # should return list of StorageVolume objects node = self.driver.list_nodes()[0] volumes = self.driver.ex_list_volumes(node=node) self.assertTrue(isinstance(volumes, list)) self.assertTrue(isinstance(volumes[0], StorageVolume)) self.assertEqual(len(volumes), 2) class LinodeMockHttp(MockHttp): fixtures = ComputeFileFixtures('linode') def _avail_datacenters(self, method, url, body, headers): body = self.fixtures.load('_avail_datacenters.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _avail_linodeplans(self, method, url, body, headers): body = self.fixtures.load('_avail_linodeplans.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _avail_distributions(self, method, url, body, headers): body = self.fixtures.load('_avail_distributions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_create(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.create","DATA":{"LinodeID":8098}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_disk_create(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.disk.create","DATA":{"JobID":1298,"DiskID":55647}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_disk_delete(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.disk.delete","DATA":{"JobID":1298,"DiskID":55648}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_disk_createfromdistribution(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.disk.createFromDistribution","DATA":{"JobID":1298,"DiskID":55647}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_disk_list(self, method, url, body, headers): body = self.fixtures.load('_linode_disk_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_delete(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.delete","DATA":{"LinodeID":8098}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_update(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.update","DATA":{"LinodeID":8098}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_reboot(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.reboot","DATA":{"JobID":1305}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _avail_kernels(self, method, url, body, headers): body = self.fixtures.load('_avail_kernels.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_boot(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.boot","DATA":{"JobID":1300}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_config_create(self, method, url, body, headers): body = '{"ERRORARRAY":[],"ACTION":"linode.config.create","DATA":{"ConfigID":31239}}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_list(self, method, url, body, headers): body = self.fixtures.load('_linode_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_ip_list(self, method, url, body, headers): body = self.fixtures.load('_linode_ip_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _batch(self, method, url, body, headers): body = self.fixtures.load('_batch.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_auroracompute.py0000664000175000017500000000421512701023453025144 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.auroracompute import AuroraComputeNodeDriver from libcloud.compute.drivers.auroracompute import AuroraComputeRegion from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase from libcloud.test import unittest class AuroraComputeNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): driver_klass = AuroraComputeNodeDriver def test_api_host(self): driver = self.driver_klass('invalid', 'invalid') self.assertEqual(driver.host, 'api.auroracompute.eu') def test_without_region(self): driver = self.driver_klass('invalid', 'invalid') self.assertEqual(driver.path, '/ams') def test_with_ams_region(self): driver = self.driver_klass('invalid', 'invalid', region=AuroraComputeRegion.AMS) self.assertEqual(driver.path, '/ams') def test_with_miami_region(self): driver = self.driver_klass('invalid', 'invalid', region=AuroraComputeRegion.MIA) self.assertEqual(driver.path, '/mia') def test_with_tokyo_region(self): driver = self.driver_klass('invalid', 'invalid', region=AuroraComputeRegion.TYO) self.assertEqual(driver.path, '/tyo') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_digitalocean_v2.py0000664000175000017500000004213513153541406025320 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from datetime import datetime from libcloud.utils.iso8601 import UTC try: import simplejson as json except ImportError: import json # NOQA from libcloud.utils.py3 import httplib from libcloud.common.types import InvalidCredsError from libcloud.common.digitalocean import DigitalOcean_v1_Error from libcloud.compute.base import NodeImage from libcloud.compute.drivers.digitalocean import DigitalOceanNodeDriver from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import DIGITALOCEAN_v1_PARAMS from libcloud.test.secrets import DIGITALOCEAN_v2_PARAMS # class DigitalOceanTests(unittest.TestCase, TestCaseMixin): class DigitalOcean_v2_Tests(LibcloudTestCase): def setUp(self): DigitalOceanNodeDriver.connectionCls.conn_class = \ DigitalOceanMockHttp DigitalOceanMockHttp.type = None self.driver = DigitalOceanNodeDriver(*DIGITALOCEAN_v2_PARAMS) def test_v1_Error(self): self.assertRaises(DigitalOcean_v1_Error, DigitalOceanNodeDriver, *DIGITALOCEAN_v1_PARAMS, api_version='v1') def test_v2_uses_v1_key(self): self.assertRaises(InvalidCredsError, DigitalOceanNodeDriver, *DIGITALOCEAN_v1_PARAMS, api_version='v2') def test_authentication(self): DigitalOceanMockHttp.type = 'UNAUTHORIZED' self.assertRaises(InvalidCredsError, self.driver.list_nodes) def test_list_images_success(self): images = self.driver.list_images() self.assertTrue(len(images) >= 1) image = images[0] self.assertTrue(image.id is not None) self.assertTrue(image.name is not None) def test_list_sizes_success(self): sizes = self.driver.list_sizes() self.assertTrue(len(sizes) >= 1) size = sizes[0] self.assertTrue(size.id is not None) self.assertEqual(size.name, '512mb') self.assertEqual(size.ram, 512) size = sizes[1] self.assertTrue(size.id is not None) self.assertEqual(size.name, '1gb') self.assertEqual(size.ram, 1024) def test_list_locations_success(self): locations = self.driver.list_locations() self.assertTrue(len(locations) >= 1) location = locations[0] self.assertEqual(location.id, 'nyc1') self.assertEqual(location.name, 'New York 1') def test_list_nodes_success(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].name, 'example.com') self.assertEqual(nodes[0].public_ips, ['104.236.32.182']) self.assertEqual(nodes[0].extra['image']['id'], 6918990) self.assertEqual(nodes[0].extra['size_slug'], '512mb') def test_list_nodes_fills_created_datetime(self): nodes = self.driver.list_nodes() self.assertEqual(nodes[0].created_at, datetime(2014, 11, 14, 16, 29, 21, tzinfo=UTC)) def test_create_node_invalid_size(self): image = NodeImage(id='invalid', name=None, driver=self.driver) size = self.driver.list_sizes()[0] location = self.driver.list_locations()[0] DigitalOceanMockHttp.type = 'INVALID_IMAGE' expected_msg = \ r'You specified an invalid image for Droplet creation. \(code: (404|HTTPStatus.NOT_FOUND)\)' self.assertRaisesRegexp(Exception, expected_msg, self.driver.create_node, name='test', size=size, image=image, location=location) def test_reboot_node_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'REBOOT' result = self.driver.reboot_node(node) self.assertTrue(result) def test_create_image_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'SNAPSHOT' result = self.driver.create_image(node, 'My snapshot') self.assertTrue(result) def test_get_image_success(self): image = self.driver.get_image(12345) self.assertEqual(image.name, 'My snapshot') self.assertEqual(image.id, '12345') self.assertEqual(image.extra['distribution'], 'Ubuntu') def test_delete_image_success(self): image = self.driver.get_image(12345) DigitalOceanMockHttp.type = 'DESTROY' result = self.driver.delete_image(image) self.assertTrue(result) def test_ex_power_on_node_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'POWERON' result = self.driver.ex_power_on_node(node) self.assertTrue(result) def test_ex_shutdown_node_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'SHUTDOWN' result = self.driver.ex_shutdown_node(node) self.assertTrue(result) def test_ex_hard_reboot_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'POWERCYCLE' result = self.driver.ex_hard_reboot(node) self.assertTrue(result) def test_destroy_node_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'DESTROY' result = self.driver.destroy_node(node) self.assertTrue(result) def test_ex_change_kernel_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'KERNELCHANGE' result = self.driver.ex_change_kernel(node, 7515) self.assertTrue(result) def test_ex_rename_node_success(self): node = self.driver.list_nodes()[0] DigitalOceanMockHttp.type = 'RENAME' result = self.driver.ex_rename_node(node, 'fedora helios') self.assertTrue(result) def test_list_key_pairs(self): keys = self.driver.list_key_pairs() self.assertEqual(len(keys), 1) self.assertEqual(keys[0].extra['id'], 7717) self.assertEqual(keys[0].name, 'test1') self.assertEqual(keys[0].public_key, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDGk5 example") def test_create_key_pair(self): DigitalOceanMockHttp.type = 'CREATE' key = self.driver.create_key_pair( name="test1", public_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQsxRiUKn example" ) self.assertEqual(key.name, "test1") self.assertEqual(key.fingerprint, "f5:d1:78:ed:28:72:5f:e1:ac:94:fd:1f:e0:a3:48:6d") def test_delete_key_pair(self): key = self.driver.list_key_pairs()[0] result = self.driver.delete_key_pair(key) self.assertTrue(result) def test__paginated_request_single_page(self): nodes = self.driver._paginated_request('/v2/droplets', 'droplets') self.assertEqual(nodes[0]['name'], 'example.com') self.assertEqual(nodes[0]['image']['id'], 6918990) self.assertEqual(nodes[0]['size_slug'], '512mb') def test__paginated_request_two_pages(self): DigitalOceanMockHttp.type = 'PAGE_ONE' nodes = self.driver._paginated_request('/v2/droplets', 'droplets') self.assertEqual(len(nodes), 2) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 1) volume = volumes[0] self.assertEqual(volume.id, "62766883-2c28-11e6-b8e6-000f53306ae1") self.assertEqual(volume.name, "example") self.assertEqual(volume.size, 4) self.assertEqual(volume.driver, self.driver) def test_list_volumes_empty(self): DigitalOceanMockHttp.type = 'EMPTY' volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 0) def test_create_volume(self): nyc1 = [r for r in self.driver.list_locations() if r.id == 'nyc1'][0] DigitalOceanMockHttp.type = 'CREATE' volume = self.driver.create_volume(4, 'example', nyc1) self.assertEqual(volume.id, "62766883-2c28-11e6-b8e6-000f53306ae1") self.assertEqual(volume.name, "example") self.assertEqual(volume.size, 4) self.assertEqual(volume.driver, self.driver) def test_attach_volume(self): node = self.driver.list_nodes()[0] volume = self.driver.list_volumes()[0] DigitalOceanMockHttp.type = 'ATTACH' resp = self.driver.attach_volume(node, volume) self.assertTrue(resp) def test_detach_volume(self): volume = self.driver.list_volumes()[0] DigitalOceanMockHttp.type = 'DETACH' resp = self.driver.detach_volume(volume) self.assertTrue(resp) def test_destroy_volume(self): volume = self.driver.list_volumes()[0] DigitalOceanMockHttp.type = 'DESTROY' resp = self.driver.destroy_volume(volume) self.assertTrue(resp) def test_list_volume_snapshots(self): volume = self.driver.list_volumes()[0] snapshots = self.driver.list_volume_snapshots(volume) self.assertEqual(len(snapshots), 3) snapshot1, snapshot2, snapshot3 = snapshots self.assertEqual(snapshot1.id, "c0def940-9324-11e6-9a56-000f533176b1") self.assertEqual(snapshot2.id, "c2036724-9343-11e6-aef4-000f53315a41") self.assertEqual(snapshot3.id, "d347e033-9343-11e6-9a56-000f533176b1") def test_create_volume_snapshot(self): volume = self.driver.list_volumes()[0] DigitalOceanMockHttp.type = 'CREATE' snapshot = self.driver.create_volume_snapshot(volume, 'test-snapshot') self.assertEqual(snapshot.id, "c0def940-9324-11e6-9a56-000f533176b1") self.assertEqual(snapshot.name, 'test-snapshot') self.assertEqual(volume.driver, self.driver) def test_delete_volume_snapshot(self): volume = self.driver.list_volumes()[0] snapshot = self.driver.list_volume_snapshots(volume)[0] DigitalOceanMockHttp.type = 'DELETE' result = self.driver.delete_volume_snapshot(snapshot) self.assertTrue(result) class DigitalOceanMockHttp(MockHttp): fixtures = ComputeFileFixtures('digitalocean_v2') def _v2_regions(self, method, url, body, headers): body = self.fixtures.load('list_locations.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_images(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_sizes(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_droplets(self, method, url, body, headers): body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_droplets_INVALID_IMAGE(self, method, url, body, headers): body = self.fixtures.load('error_invalid_image.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _v2_droplets_3164444_actions_REBOOT(self, method, url, body, headers): # reboot_node body = self.fixtures.load('reboot_node.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_3164444_DESTROY(self, method, url, body, headers): # destroy_node return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _v2_droplets_3164444_actions_KERNELCHANGE(self, method, url, body, headers): # change_kernel body = self.fixtures.load('ex_change_kernel.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_3164444_actions_RENAME(self, method, url, body, headers): # rename_node body = self.fixtures.load('ex_rename_node.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_3164444_actions_SNAPSHOT(self, method, url, body, headers): # create_image body = self.fixtures.load('create_image.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_images_12345(self, method, url, body, headers): # get_image body = self.fixtures.load('get_image.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_images_12345_DESTROY(self, method, url, body, headers): # delete_image return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _v2_droplets_3164444_actions_POWERON(self, method, url, body, headers): # ex_power_on_node body = self.fixtures.load('ex_power_on_node.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_3164444_actions_SHUTDOWN(self, method, url, body, headers): # ex_shutdown_node body = self.fixtures.load('ex_shutdown_node.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_3164444_actions_POWERCYCLE(self, method, url, body, headers): # ex_hard_reboot body = self.fixtures.load('ex_hard_reboot.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.OK]) def _v2_account_keys(self, method, url, body, headers): body = self.fixtures.load('list_key_pairs.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_account_keys_7717(self, method, url, body, headers): # destroy_ssh_key return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _v2_account_keys_CREATE(self, method, url, body, headers): # create_ssh_key body = self.fixtures.load('create_key_pair.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_droplets_UNAUTHORIZED(self, method, url, body, headers): body = self.fixtures.load('error.json') return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) def _v2_droplets_PAGE_ONE(self, method, url, body, headers): body = self.fixtures.load('list_nodes_page_1.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_volumes(self, method, url, body, headers): body = self.fixtures.load('list_volumes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_volumes_EMPTY(self, method, url, body, headers): body = self.fixtures.load('list_volumes_empty.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_volumes_CREATE(self, method, url, body, headers): body = self.fixtures.load('create_volume.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_volumes_actions_ATTACH(self, method, url, body, headers): body = self.fixtures.load('attach_volume.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _v2_volumes_DETACH(self, method, url, body, headers): body = self.fixtures.load('detach_volume.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_volumes_62766883_2c28_11e6_b8e6_000f53306ae1_DESTROY(self, method, url, body, headers): return (httplib.NO_CONTENT, None, {}, httplib.responses[httplib.NO_CONTENT]) def _v2_volumes_62766883_2c28_11e6_b8e6_000f53306ae1_snapshots_CREATE( self, method, url, body, headers): body = self.fixtures.load('create_volume_snapshot.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _v2_volumes_62766883_2c28_11e6_b8e6_000f53306ae1_snapshots( self, method, url, body, headers): body = self.fixtures.load('list_volume_snapshots.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_snapshots_c0def940_9324_11e6_9a56_000f533176b1_DELETE( self, method, url, body, headers): return (httplib.NO_CONTENT, None, {}, httplib.responses[httplib.NO_CONTENT]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_softlayer.py0000664000175000017500000002475013153541406024301 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys import pytest try: import Crypto Crypto crypto = True except ImportError: crypto = False from libcloud.common.types import InvalidCredsError from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib from libcloud.utils.py3 import next from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer from libcloud.compute.drivers.softlayer import SoftLayerException, \ NODE_STATE_MAP from libcloud.compute.types import NodeState, KeyPairDoesNotExistError from libcloud.test import MockHttp # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import SOFTLAYER_PARAMS null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ '00:00:00:00:00' class SoftLayerTests(unittest.TestCase): def setUp(self): SoftLayer.connectionCls.conn_class = SoftLayerMockHttp SoftLayerMockHttp.type = None self.driver = SoftLayer(*SOFTLAYER_PARAMS) def test_list_nodes(self): nodes = self.driver.list_nodes() node = nodes[0] self.assertEqual(node.name, 'libcloud-testing1.example.com') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['password'], 'L3TJVubf') def test_initializing_state(self): nodes = self.driver.list_nodes() node = nodes[1] self.assertEqual(node.state, NODE_STATE_MAP['INITIATING']) def test_list_locations(self): locations = self.driver.list_locations() dal = next(l for l in locations if l.id == 'dal05') self.assertEqual(dal.country, 'US') self.assertEqual(dal.id, 'dal05') self.assertEqual(dal.name, 'Dallas - Central U.S.') def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(image.id, 'CENTOS_6_64') def test_get_image(self): image = self.driver.get_image('CENTOS_6_64') self.assertEqual(image.id, 'CENTOS_6_64') def test_fail_get_image(self): self.assertRaises( SoftLayerException, self.driver.get_image, 'NOT_IMAGE') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 13) for size in sizes: self.assertTrue(size.price > 0.0) def test_create_node(self): node = self.driver.create_node(name="libcloud-testing", location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0]) self.assertEqual(node.name, 'libcloud-testing.example.com') self.assertEqual(node.state, NODE_STATE_MAP['RUNNING']) def test_create_fail(self): SoftLayerMockHttp.type = "SOFTLAYEREXCEPTION" self.assertRaises( SoftLayerException, self.driver.create_node, name="SOFTLAYEREXCEPTION", location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0]) def test_create_creds_error(self): SoftLayerMockHttp.type = "INVALIDCREDSERROR" self.assertRaises( InvalidCredsError, self.driver.create_node, name="INVALIDCREDSERROR", location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0]) def test_create_node_no_location(self): self.driver.create_node(name="Test", size=self.driver.list_sizes()[0], image=self.driver.list_images()[0]) def test_create_node_no_image(self): self.driver.create_node(name="Test", size=self.driver.list_sizes()[0]) def test_create_node_san(self): self.driver.create_node(name="Test", ex_local_disk=False) def test_create_node_domain_for_name(self): self.driver.create_node(name="libcloud.org") def test_create_node_ex_options(self): self.driver.create_node(name="Test", location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0], ex_domain='libcloud.org', ex_cpus=2, ex_ram=2048, ex_disk=100, ex_key='test1', ex_bandwidth=10, ex_local_disk=False, ex_datacenter='Dal05', ex_os='UBUNTU_LATEST') def test_reboot_node(self): node = self.driver.list_nodes()[0] self.driver.reboot_node(node) def test_destroy_node(self): node = self.driver.list_nodes()[0] self.driver.destroy_node(node) def test_list_keypairs(self): keypairs = self.driver.list_key_pairs() self.assertEqual(len(keypairs), 2) self.assertEqual(keypairs[0].name, 'test1') self.assertEqual(keypairs[0].fingerprint, null_fingerprint) def test_get_key_pair(self): key_pair = self.driver.get_key_pair(name='test1') self.assertEqual(key_pair.name, 'test1') def test_get_key_pair_does_not_exist(self): self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, name='test-key-pair') @pytest.mark.skip(reason="no way of currently testing this") def test_create_key_pair(self): if crypto: key_pair = self.driver.create_key_pair(name='my-key-pair') fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d' ':37:2d:7d:b8:ca:9f:f5:f1:6f') self.assertEqual(key_pair.name, 'my-key-pair') self.assertEqual(key_pair.fingerprint, fingerprint) self.assertTrue(key_pair.private_key is not None) else: self.assertRaises(NotImplementedError, self.driver.create_key_pair, name='my-key-pair') def test_delete_key_pair(self): success = self.driver.delete_key_pair('test1') self.assertTrue(success) class SoftLayerMockHttp(MockHttp): fixtures = ComputeFileFixtures('softlayer') def _get_method_name(self, type, use_param, qs, path): return "_xmlrpc" def _xmlrpc(self, method, url, body, headers): params, meth_name = xmlrpclib.loads(body) url = url.replace("/", "_") meth_name = "%s_%s" % (url, meth_name) return getattr(self, meth_name)(method, url, body, headers) def _xmlrpc_v3_SoftLayer_Virtual_Guest_getCreateObjectOptions( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Account_getVirtualGuests( self, method, url, body, headers): body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters( self, method, url, body, headers): body = self.fixtures.load( 'v3_SoftLayer_Location_Datacenter_getDatacenters.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Virtual_Guest_createObject( self, method, url, body, headers): fixture = { None: 'v3__SoftLayer_Virtual_Guest_createObject.xml', 'INVALIDCREDSERROR': 'SoftLayer_Account.xml', 'SOFTLAYEREXCEPTION': 'fail.xml', }[self.type] body = self.fixtures.load(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Virtual_Guest_getObject( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Virtual_Guest_getObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Virtual_Guest_rebootSoft( self, method, url, body, headers): body = self.fixtures.load('empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Virtual_Guest_deleteObject( self, method, url, body, headers): body = self.fixtures.load('empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Account_getSshKeys( self, method, url, body, headers): body = self.fixtures.load('v3__SoftLayer_Account_getSshKeys.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_getObject( self, method, url, body, headers): body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_getObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_createObject( self, method, url, body, headers): body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_createObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_deleteObject( self, method, url, body, headers): body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_deleteObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ecs.py0000664000175000017500000013113313153541406023035 0ustar kamikami00000000000000# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import sys import unittest from libcloud.common.types import LibcloudError from libcloud.compute.base import Node, NodeAuthPassword, NodeImage, \ NodeLocation, NodeSize, StorageVolume, VolumeSnapshot from libcloud.compute.drivers.ecs import ECSDriver from libcloud.compute.types import NodeState, StorageVolumeState from libcloud.test import MockHttp, LibcloudTestCase from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import ECS_PARAMS from libcloud.utils.py3 import httplib class ECSDriverTestCase(LibcloudTestCase): region = 'cn-qingdao' zone = 'cn-qingdao-b' image_id = 'ubuntu1404_64_20G_aliaegis_20150325.vhd' def setUp(self): ECSMockHttp.test = self ECSDriver.connectionCls.conn_class = ECSMockHttp ECSMockHttp.use_param = 'Action' ECSMockHttp.type = None self.driver = ECSDriver(*ECS_PARAMS, region=self.region) self.fake_size = NodeSize('ecs.t1.small', 'ecs t1 small', None, None, None, None, self.driver) self.fake_image = NodeImage(self.image_id, name='ubuntu 14.04 64bit', driver=self.driver) self.fake_node = Node(id='fake-node1', name='fake-node', state=NodeState.RUNNING, public_ips=None, private_ips=None, driver=self.driver) self.fake_volume = StorageVolume(id='fake-volume1', name='fake-volume', size=self.fake_size, driver=self.driver) self.fake_snapshot = VolumeSnapshot(id='fake-snapshot1', driver=self.driver) self.fake_location = NodeLocation(id=self.region, name=self.region, country=None, driver=self.driver) self.fake_instance_id = 'fake_instance_id' def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertIsNotNone(nodes) self.assertEqual(1, len(nodes)) node = nodes[0] self.assertEqual('iZ28n7dkvovZ', node.name) self.assertEqual('i-28n7dkvov', node.id) self.assertEqual(NodeState.PENDING, node.state) self.assertEqual(1, len(node.public_ips)) self.assertEqual('114.215.124.73', node.public_ips[0]) self.assertEqual(1, len(node.private_ips)) self.assertEqual('10.163.197.74', node.private_ips[0]) expected_extra = { 'image_id': 'ubuntu1404_64_20G_aliaegis_20150325.vhd', 'description': '', 'instance_type_family': 'ecs.t1', 'zone_id': 'cn-qingdao-b', 'internet_charge_type': 'PayByTraffic', 'serial_number': 'ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3', 'io_optimized': 'false', 'device_available': 'true', 'instance_network_type': 'classic', 'hostname': 'iZ28n7dkvovZ', 'instance_type': 'ecs.t1.small', 'creation_time': '2015-12-27T07:35Z', 'instance_charge_type': 'PostPaid', 'expired_time': '2999-09-08T16:00Z' } self._validate_extras(expected_extra, node.extra) vpc = { 'vpc_id': '', 'vswitch_id': '', 'private_ip_address': None, 'nat_ip_address': '' } self._validate_extras(vpc, node.extra['vpc_attributes']) eip_address = { 'allocation_id': '', 'ip_address': '', 'internet_charge_type': '', 'bandwidth': None } self._validate_extras(eip_address, node.extra['eip_address']) self.assertIsNone(node.extra['operation_locks']['lock_reason']) def test_list_nodes_with_ex_node_ids(self): ECSMockHttp.type = 'list_nodes_ex_node_ids' nodes = self.driver.list_nodes(ex_node_ids=['i-28n7dkvov', 'not-existed-id']) self.assertIsNotNone(nodes) def test_list_nodes_with_ex_filters(self): ECSMockHttp.type = 'list_nodes_ex_filters' nodes = self.driver.list_nodes(ex_filters={'ZoneId': self.zone}) self.assertIsNotNone(nodes) def _validate_extras(self, expected, actual): self.assertIsNotNone(actual) for key, value in iter(expected.items()): self.assertTrue(key in actual) self.assertEqual(value, actual[key], ('extra %(key)s not equal, ' 'expected: "%(expected)s", ' 'actual: "%(actual)s"' % {'key': key, 'expected': value, 'actual': actual[key]})) def test_create_node(self): ECSMockHttp.type = 'create_node' name = 'test_create_node' node = self.driver.create_node(name=name, image=self.fake_image, size=self.fake_size, ex_security_group_id='sg-28ou0f3xa', ex_description='description', ex_internet_charge_type='PayByTraffic', ex_internet_max_bandwidth_out=1, ex_internet_max_bandwidth_in=200, ex_hostname='hostname', auth=NodeAuthPassword('password'), ex_io_optimized=True, ex_system_disk={'category': 'cloud', 'disk_name': 'root', 'description': 'sys'}, ex_vswitch_id='vswitch-id1', ex_private_ip_address='1.1.1.2', ex_client_token='client_token') self.assertIsNotNone(node) def test_create_node_with_data_disk(self): ECSMockHttp.type = 'create_node_with_data' self.name = 'test_create_node' self.data_disk = { 'size': 5, 'category': self.driver.disk_categories.CLOUD, 'disk_name': 'data1', 'description': 'description', 'device': '/dev/xvdb', 'delete_with_instance': True} node = self.driver.create_node(name=self.name, image=self.fake_image, size=self.fake_size, ex_security_group_id='sg-28ou0f3xa', ex_data_disks=self.data_disk) self.assertIsNotNone(node) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(2, len(sizes)) size = sizes[0] self.assertEqual('ecs.t1.xsmall', size.id) self.assertEqual('ecs.t1.xsmall', size.name) self.assertEqual(0.5, size.ram) self.assertEqual(1, size.extra['cpu_core_count']) self.assertEqual('ecs.t1', size.extra['instance_type_family']) size = sizes[1] self.assertEqual('ecs.s2.small', size.id) self.assertEqual('ecs.s2.small', size.name) self.assertEqual(1.0, size.ram) self.assertEqual(2, size.extra['cpu_core_count']) self.assertEqual('ecs.s2', size.extra['instance_type_family']) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(9, len(locations)) location = locations[0] self.assertEqual('ap-southeast-1', location.id) self.assertIsNone(location.country) def test_create_node_without_sg_id_exception(self): name = 'test_create_node_without_sg_id_exception' self.assertRaises(AttributeError, self.driver.create_node, name=name, image=self.fake_image, size=self.fake_size) def test_creat_node_paybytraffic_exception(self): name = 'test_create_node_paybytraffic_exception' self.assertRaises(AttributeError, self.driver.create_node, name=name, image=self.fake_image, size=self.fake_size, ex_security_group_id='sg-id1', ex_internet_charge_type='PayByTraffic') def test_create_node_ex_system_disk_exception(self): name = 'test_creat_node_ex_system_disk_exception' self.assertRaises(AttributeError, self.driver.create_node, name=name, image=self.fake_image, size=self.fake_size, ex_security_group_id='sg-id1', ex_system_disk=None) def test_create_node_ex_private_ip_address_exception(self): name = 'test_create_node_ex_private_ip_address_exception' self.assertRaises(AttributeError, self.driver.create_node, name=name, image=self.fake_image, size=self.fake_size, ex_security_group_id='sg-id1', ex_private_ip_address='1.1.1.2') def test_reboot_node(self): ECSMockHttp.type = 'reboot_node' result = self.driver.reboot_node(self.fake_node) self.assertTrue(result) def test_reboot_node_with_ex_force_stop(self): ECSMockHttp.type = 'reboot_node_force_stop' result = self.driver.reboot_node(self.fake_node, ex_force_stop=True) self.assertTrue(result) def test_destroy_node(self): ECSMockHttp.type = 'destroy_node' result = self.driver.destroy_node(self.fake_node) self.assertTrue(result) def test_ex_start_node(self): ECSMockHttp.type = 'start_node' result = self.driver.ex_start_node(self.fake_node) self.assertTrue(result) def test_ex_stop_node(self): ECSMockHttp.type = 'stop_node' result = self.driver.ex_stop_node(self.fake_node) self.assertTrue(result) def test_stop_node_with_ex_force_stop(self): ECSMockHttp.type = 'stop_node_force_stop' result = self.driver.ex_stop_node(self.fake_node, ex_force_stop=True) self.assertTrue(result) def test_create_public_ip(self): ECSMockHttp.type = 'create_public_ip' result = self.driver.create_public_ip(self.fake_instance_id) self.assertTrue(result) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(2, len(volumes)) volume = volumes[0] self.assertEqual('d-28m5zbua0', volume.id) self.assertEqual('', volume.name) self.assertEqual(5, volume.size) self.assertEqual(StorageVolumeState.AVAILABLE, volume.state) expected_extras = { 'region_id': 'cn-qingdao', 'zone_id': 'cn-qingdao-b', 'description': '', 'type': 'data', 'category': 'cloud', 'image_id': '', 'source_snapshot_id': '', 'product_code': '', 'portable': True, 'instance_id': '', 'device': '', 'delete_with_instance': False, 'enable_auto_snapshot': False, 'creation_time': '2014-07-23T02:44:07Z', 'attached_time': '2014-07-23T07:47:35Z', 'detached_time': '2014-07-23T08:28:48Z', 'disk_charge_type': 'PostPaid', 'operation_locks': {'lock_reason': None} } self._validate_extras(expected_extras, volume.extra) volume = volumes[1] self.assertEqual('d-28zfrmo13', volume.id) self.assertEqual('ubuntu1404sys', volume.name) self.assertEqual(5, volume.size) self.assertEqual(StorageVolumeState.INUSE, volume.state) expected_extras = { 'region_id': 'cn-qingdao', 'zone_id': 'cn-qingdao-b', 'description': 'Description', 'type': 'system', 'category': 'cloud', 'image_id': 'ubuntu1404_64_20G_aliaegis_20150325.vhd', 'source_snapshot_id': '', 'product_code': '', 'portable': False, 'instance_id': 'i-28whl2nj2', 'device': '/dev/xvda', 'delete_with_instance': True, 'enable_auto_snapshot': True, 'creation_time': '2014-07-23T02:44:06Z', 'attached_time': '2016-01-04T15:02:17Z', 'detached_time': '', 'disk_charge_type': 'PostPaid', 'operation_locks': {'lock_reason': None} } self._validate_extras(expected_extras, volume.extra) def test_list_volumes_with_ex_volume_ids(self): ECSMockHttp.type = 'list_volumes_ex_volume_ids' volumes = self.driver.list_volumes(ex_volume_ids=['i-28n7dkvov', 'not-existed-id']) self.assertIsNotNone(volumes) def test_list_volumes_with_ex_filters(self): ECSMockHttp.type = 'list_volumes_ex_filters' ex_filters = {'InstanceId': self.fake_node.id} volumes = self.driver.list_volumes(ex_filters=ex_filters) self.assertIsNotNone(volumes) def test_list_volume_snapshots(self): snapshots = self.driver.list_volume_snapshots(self.fake_volume) self.assertEqual(1, len(snapshots)) def test_list_volume_snapshots_with_ex_snapshot_ids(self): ECSMockHttp.type = 'list_volume_snapshots_ex_snapshot_ids' ex_snapshot_ids = ['fake-snapshot1'] self.driver.list_volume_snapshots(self.fake_volume, ex_snapshot_ids=ex_snapshot_ids) def test_list_volume_snapshots_with_ex_filters(self): ECSMockHttp.type = 'list_volume_snapshots_ex_filters' ex_filters = {'InstanceId': self.fake_node.id} self.driver.list_volume_snapshots(self.fake_volume, ex_filters=ex_filters) def test_create_volume(self): ECSMockHttp.type = 'create_volume' self.volume_size = 1 self.volume_name = 'fake-volume-name' self.description = 'fake-description' self.disk_category = 'system' self.client_token = 'client_token' volume = self.driver.create_volume(self.volume_size, self.volume_name, snapshot=self.fake_snapshot, ex_zone_id=self.zone, ex_description=self.description, ex_disk_category=self.disk_category, ex_client_token=self.client_token) self.assertIsNotNone(volume) def test_create_volume_without_ex_zone_id_exception(self): self.assertRaises(AttributeError, self.driver.create_volume, 1, 'fake-volume-name') def test_create_volume_snapshot(self): ECSMockHttp.type = 'create_volume_snapshot' self.snapshot_name = 'fake-snapshot1' self.description = 'fake-description' self.client_token = 'client-token' snapshot = self.driver.create_volume_snapshot( self.fake_volume, name=self.snapshot_name, ex_description=self.description, ex_client_token=self.client_token) self.assertIsNotNone(snapshot) def test_attach_volume(self): self.device = '/dev/sdb' self.delete_with_instance = True attached = self.driver.attach_volume( self.fake_node, self.fake_volume, device=self.device, ex_delete_with_instance=self.delete_with_instance) self.assertTrue(attached) def test_detach_volume(self): self.instance_id = 'fake-node1' result = self.driver.detach_volume(self.fake_volume, ex_instance_id=self.instance_id) self.assertTrue(result) def test_detach_volume_query_instance_id(self): ECSMockHttp.type = 'detach_volume' result = self.driver.detach_volume(self.fake_volume) self.assertTrue(result) def test_detach_volume_query_instance_id_exception(self): self.assertRaises(AttributeError, self.driver.detach_volume, self.fake_volume) def test_destroy_volume(self): ECSMockHttp.type = 'destroy_volume' result = self.driver.destroy_volume(self.fake_volume) self.assertTrue(result) def test_destroy_volume_query_volumes_exception(self): self.assertRaises(LibcloudError, self.driver.destroy_volume, self.fake_volume) def test_destroy_volume_state_exception(self): ECSMockHttp.type = 'destroy_volume_state' self.assertRaises(LibcloudError, self.driver.destroy_volume, self.fake_volume) def test_destroy_volume_snapshot(self): result = self.driver.destroy_volume_snapshot(self.fake_snapshot) self.assertTrue(result) def test_destroy_volume_snapshot_exception(self): self.assertRaises(AttributeError, self.driver.destroy_volume_snapshot, self.fake_volume) def test_list_images(self): images = self.driver.list_images(self.fake_location) self.assertEqual(1, len(images)) image = images[0] self.assertEqual('freebsd1001_64_20G_aliaegis_20150527.vhd', image.id) self.assertEqual('freebsd1001_64_20G_aliaegis_20150527.vhd', image.name) expected_extra = { 'image_version': '1.0.0', 'os_type': 'linux', 'platform': 'Freebsd', 'architecture': 'x86_64', 'description': 'freebsd1001_64_20G_aliaegis_20150527.vhd', 'size': 20, 'image_owner_alias': 'system', 'os_name': 'FreeBSD 10.1 64位', 'product_code': '', 'is_subscribed': False, 'progress': '100%', 'creation_time': '2015-06-19T07:25:42Z', 'usage': 'instance', 'is_copied': False } self._validate_extras(expected_extra, image.extra) expected_dev_mappings = { 'snapshot_id': '', 'size': 20, 'device': '/dev/xvda', 'format': '', 'import_oss_bucket': '', 'import_oss_object': '' } self._validate_extras(expected_dev_mappings, image.extra['disk_device_mappings']) def test_list_images_with_ex_image_ids(self): ECSMockHttp.type = 'list_images_ex_image_ids' self.driver.list_images(location=self.fake_location, ex_image_ids=[self.fake_image.id, 'not-existed']) def test_list_images_with_ex_image_ids_type_exception(self): self.assertRaises(AttributeError, self.driver.list_images, location=self.fake_location, ex_image_ids={'image_ids': 'id1,id2'}) def test_list_images_with_ex_filters(self): ECSMockHttp.type = 'list_images_ex_filters' ex_filters = {'Status': 'Available'} self.driver.list_images(location=self.fake_location, ex_filters=ex_filters) def test_list_images_multiple_pages(self): ECSMockHttp.type = 'list_images_pages' images = self.driver.list_images() self.assertEqual(2, len(images)) def test_create_image(self): self.image_name = 'fake-image1' self.description = 'description' self.image_version = '1.0.0' self.client_token = 'client_token' image = self.driver.create_image(None, self.image_name, self.description, ex_snapshot_id=self.fake_snapshot.id, ex_image_version=self.image_version, ex_client_token=self.client_token) self.assertIsNotNone(image) def test_creaet_image_exception(self): self.assertRaises(AttributeError, self.driver.create_image, None, None) def test_delete_image(self): result = self.driver.delete_image(self.fake_image) self.assertTrue(result) def test_get_image(self): ECSMockHttp.type = 'get_image' image = self.driver.get_image(self.fake_image.id) self.assertIsNotNone(image) def test_get_image_not_found_exception(self): ECSMockHttp.type = 'get_image_not_found' self.assertRaises(LibcloudError, self.driver.get_image, self.fake_image.id) def test_copy_image(self): self.image_name = 'copied-image1' self.description = 'description' self.dest_region = 'cn-hangzhou' self.client_token = 'client-token' image = self.driver.copy_image( self.region, self.fake_image, self.image_name, description=self.description, ex_destination_region_id=self.dest_region, ex_client_token=self.client_token) self.assertIsNotNone(image) def test_copy_image_in_the_same_region(self): ECSMockHttp.type = 'copy_image_same_region' image = self.driver.copy_image(self.region, self.fake_image, None) self.assertIsNotNone(image) def test_ex_list_security_groups(self): sgs = self.driver.ex_list_security_groups() self.assertEqual(1, len(sgs)) sg = sgs[0] self.assertEqual('sg-28ou0f3xa', sg.id) self.assertEqual('sg-28ou0f3xa', sg.name) self.assertEqual('System created security group.', sg.description) self.assertEqual('', sg.vpc_id) self.assertEqual('2015-06-26T08:35:30Z', sg.creation_time) def test_ex_join_security_group(self): ex_security_group_id_value = 'sg-28ou0f3xa' result = self.driver.ex_join_security_group(self.fake_node, group_id=ex_security_group_id_value) self.assertTrue(result) def test_ex_leave_security_group(self): ex_security_group_id_value = 'sg-28ou0f3xa' result = self.driver.ex_leave_security_group(self.fake_node, group_id=ex_security_group_id_value) self.assertTrue(result) def test_ex_list_security_groups_with_ex_filters(self): ECSMockHttp.type = 'list_sgs_filters' self.vpc_id = 'vpc1' ex_filters = {'VpcId': self.vpc_id} sgs = self.driver.ex_list_security_groups(ex_filters=ex_filters) self.assertEqual(1, len(sgs)) def test_ex_list_zones(self): zones = self.driver.ex_list_zones() self.assertEqual(1, len(zones)) zone = zones[0] self.assertEqual('cn-qingdao-b', zone.id) self.assertEqual(self.driver, zone.driver) self.assertEqual('青岛可用区B', zone.name) self.assertIsNotNone(zone.available_resource_types) self.assertEqual('IoOptimized', zone.available_resource_types[0]) self.assertIsNotNone(zone.available_instance_types) self.assertEqual('ecs.m2.medium', zone.available_instance_types[0]) self.assertIsNotNone(zone.available_disk_categories) self.assertEqual('cloud_ssd', zone.available_disk_categories[0]) class ECSMockHttp(MockHttp): fixtures = ComputeFileFixtures('ecs') def _DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_nodes_ex_node_ids_DescribeInstances(self, method, url, body, headers): params = {'InstanceIds': '["i-28n7dkvov", "not-existed-id"]'} self.assertUrlContainsQueryParams(url, params) return self._DescribeInstances(method, url, body, headers) def _list_nodes_ex_filters_DescribeInstances(self, method, url, body, headers): params = {'ZoneId': self.test.zone} self.assertUrlContainsQueryParams(url, params) return self._DescribeInstances(method, url, body, headers) def _DescribeInstanceTypes(self, method, url, body, headers): resp_body = self.fixtures.load('describe_instance_types.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DescribeRegions(self, method, url, body, headers): resp_body = self.fixtures.load('describe_regions.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_CreateInstance(self, method, url, body, headers): params = {'SecurityGroupId': 'sg-28ou0f3xa', 'Description': 'description', 'InternetChargeType': 'PayByTraffic', 'InternetMaxBandwidthOut': '1', 'InternetMaxBandwidthIn': '200', 'HostName': 'hostname', 'Password': 'password', 'IoOptimized': 'optimized', 'SystemDisk.Category': 'cloud', 'SystemDisk.DiskName': 'root', 'SystemDisk.Description': 'sys', 'VSwitchId': 'vswitch-id1', 'PrivateIpAddress': '1.1.1.2', 'ClientToken': 'client_token'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('create_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_StartInstance(self, method, url, body, headers): resp_body = self.fixtures.load('start_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_with_data_CreateInstance(self, method, url, body, headers): params = {'SecurityGroupId': 'sg-28ou0f3xa', 'DataDisk.1.Size': '5', 'DataDisk.1.Category': 'cloud', 'DataDisk.1.DiskName': 'data1', 'DataDisk.1.Description': 'description', 'DataDisk.1.Device': '/dev/xvdb', 'DataDisk.1.DeleteWithInstance': 'true'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_with_data_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('create_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_node_with_data_StartInstance(self, method, url, body, headers): resp_body = self.fixtures.load('start_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _reboot_node_RebootInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id, 'ForceStop': 'false'}) resp_body = self.fixtures.load('reboot_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _reboot_node_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('reboot_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _reboot_node_force_stop_RebootInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id, 'ForceStop': 'true'}) resp_body = self.fixtures.load('reboot_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _reboot_node_force_stop_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('reboot_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _destroy_node_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('destroy_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _destroy_node_DeleteInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id}) resp_body = self.fixtures.load('delete_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _start_node_StartInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id}) resp_body = self.fixtures.load('start_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _start_node_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('reboot_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _stop_node_StopInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id, 'ForceStop': 'false'}) resp_body = self.fixtures.load('stop_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _stop_node_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('stop_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _stop_node_force_stop_StopInstance(self, method, url, body, headers): node_id = self.test.fake_node.id self.assertUrlContainsQueryParams(url, {'InstanceId': node_id, 'ForceStop': 'true'}) resp_body = self.fixtures.load('stop_instance.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _stop_node_force_stop_DescribeInstances(self, method, url, body, headers): resp_body = self.fixtures.load('stop_node_describe_instances.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DescribeDisks(self, method, url, body, headers): resp_body = self.fixtures.load('describe_disks.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_volumes_ex_volume_ids_DescribeDisks(self, method, url, body, headers): region = self.test.region params = {'DiskIds': '["i-28n7dkvov", "not-existed-id"]', 'RegionId': region} self.assertUrlContainsQueryParams(url, params) return self._DescribeInstances(method, url, body, headers) def _list_volumes_ex_filters_DescribeDisks(self, method, url, body, headers): params = {'InstanceId': self.test.fake_node.id} self.assertUrlContainsQueryParams(url, params) return self._DescribeDisks(method, url, body, headers) def _DescribeSnapshots(self, method, url, body, headers): region = self.test.region volume_id = self.test.fake_volume.id params = {'RegionId': region, 'DiskId': volume_id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('describe_snapshots.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_volume_snapshots_ex_snapshot_ids_DescribeSnapshots( self, method, url, body, headers): params = {'RegionId': self.test.region, 'SnapshotIds': '["fake-snapshot1"]'} self.assertUrlContainsQueryParams(url, params) return self._DescribeSnapshots(method, url, body, headers) def _list_volume_snapshots_ex_filters_DescribeSnapshots(self, method, url, body, headers): params = {'InstanceId': self.test.fake_node.id} self.assertUrlContainsQueryParams(url, params) return self._DescribeSnapshots(method, url, body, headers) def _create_volume_CreateDisk(self, method, url, body, headers): params = {'RegionId': self.test.region, 'DiskName': self.test.volume_name, 'Size': str(self.test.volume_size), 'ZoneId': self.test.zone, 'SnapshotId': self.test.fake_snapshot.id, 'Description': self.test.description, 'DiskCategory': self.test.disk_category, 'ClientToken': self.test.client_token} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_disk.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_volume_DescribeDisks(self, method, url, body, headers): resp_body = self.fixtures.load('create_volume_describe_disks.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_volume_snapshot_CreateSnapshot(self, method, url, body, headers): params = {'DiskId': self.test.fake_volume.id, 'SnapshotName': self.test.snapshot_name, 'Description': self.test.description, 'ClientToken': self.test.client_token} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_snapshot.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_volume_snapshot_DescribeSnapshots(self, method, url, body, headers): resp_body = self.fixtures.load('describe_snapshots.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _AttachDisk(self, method, url, body, headers): delete_with_instance = str(self.test.delete_with_instance).lower() params = {'InstanceId': self.test.fake_node.id, 'DiskId': self.test.fake_volume.id, 'Device': self.test.device, 'DeleteWithInstance': delete_with_instance} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('attach_disk.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DetachDisk(self, method, url, body, headers): params = {'DiskId': self.test.fake_volume.id, 'InstanceId': self.test.instance_id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('detach_disk.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _detach_volume_DescribeDisks(self, method, url, body, headers): params = {'DiskIds': '["' + self.test.fake_volume.id + '"]'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('detach_volume_describe_disks.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _detach_volume_DetachDisk(self, method, url, body, headers): params = {'DiskId': self.test.fake_volume.id, 'InstanceId': 'i-28whl2nj2'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('detach_disk.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _destroy_volume_DescribeDisks(self, method, url, body, headers): params = {'DiskIds': '["' + self.test.fake_volume.id + '"]'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('destroy_volume_describe_disks.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _destroy_volume_DeleteDisk(self, method, url, body, headers): params = {'DiskId': self.test.fake_volume.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('delete_disk.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _destroy_volume_state_DescribeDisks(self, method, url, body, headers): return self._detach_volume_DescribeDisks(method, url, body, headers) def _DeleteSnapshot(self, method, url, body, header): params = {'SnapshotId': self.test.fake_snapshot.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('delete_snapshot.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DescribeImages(self, method, url, body, headers): params = {'RegionId': self.test.fake_location.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('describe_images.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_images_pages_DescribeImages(self, method, url, body, headers): if 'PageNumber=2' in url: resp_body = self.fixtures.load('pages_describe_images_page2.xml') else: resp_body = self.fixtures.load('pages_describe_images.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_images_ex_image_ids_DescribeImages(self, method, url, body, headers): params = {'ImageId': self.test.fake_image.id + ',not-existed'} self.assertUrlContainsQueryParams(url, params) return self._DescribeImages(method, url, body, headers) def _list_images_ex_filters_DescribeImages(self, method, url, body, headers): params = {'Status': 'Available'} self.assertUrlContainsQueryParams(url, params) return self._DescribeImages(method, url, body, headers) def _CreateImage(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageName': self.test.image_name, 'Description': self.test.description, 'SnapshotId': self.test.fake_snapshot.id, 'ImageVersion': self.test.image_version, 'ClientToken': self.test.client_token} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_image.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DeleteImage(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageId': self.test.fake_image.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('delete_image.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _get_image_DescribeImages(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageId': self.test.fake_image.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('describe_images.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _get_image_not_found_DescribeImages(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageId': self.test.fake_image.id} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('get_image_describe_images.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _CopyImage(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageId': self.test.fake_image.id, 'DestinationRegionId': self.test.dest_region, 'DestinationImageName': self.test.image_name, 'DestinationDescription': self.test.description, 'ClientToken': self.test.client_token} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('copy_image.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _copy_image_same_region_CopyImage(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ImageId': self.test.fake_image.id, 'DestinationRegionId': self.test.region} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('copy_image.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _copy_image_same_region_DescribeImages(self, method, url, body, headers): return self._DescribeImages(method, url, body, headers) def _DescribeSecurityGroups(self, method, url, body, headers): params = {'RegionId': self.test.region} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('describe_security_groups.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _JoinSecurityGroup(self, method, url, body, headers): body = self.fixtures.load('join_security_group_by_id.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _LeaveSecurityGroup(self, method, url, body, headers): body = self.fixtures.load('leave_security_group_by_id.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _list_sgs_filters_DescribeSecurityGroups(self, method, url, body, headers): params = {'VpcId': self.test.vpc_id} self.assertUrlContainsQueryParams(url, params) return self._DescribeSecurityGroups(method, url, body, headers) def _create_sg_CreateSecurityGroup(self, method, url, body, headers): params = {'RegionId': self.test.region, 'Description': 'description', 'ClientToken': 'clientToken'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('create_security_group.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _delete_sg_by_id_DeleteSecurityGroup(self, method, url, body, headers): params = {'RegionId': self.test.region, 'SecurityGroupId': 'sg-fakeSecurityGroupId'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('delete_security_group_by_id.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _list_sgas_DescribeSecurityGroupAttributes(self, method, url, body, headers): params = {'RegionId': self.test.region, 'SecurityGroupId': 'sg-fakeSecurityGroupId', 'NicType': 'internet'} self.assertUrlContainsQueryParams(url, params) resp_body = self.fixtures.load('describe_security_group_attributes.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _DescribeZones(self, method, url, body, headers): resp_body = self.fixtures.load('describe_zones.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) def _create_public_ip_AllocatePublicIpAddress(self, method, url, body, headers): resp_body = self.fixtures.load('create_public_ip.xml') return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ntta.py0000664000175000017500000000245313153541406023233 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.ntta import NTTAmericaNodeDriver from libcloud.test.compute.test_dimensiondata_v2_3 import DimensionDataMockHttp, DimensionData_v2_3_Tests class NTTAmericaNodeDriverTests(DimensionData_v2_3_Tests, unittest.TestCase): def setUp(self): NTTAmericaNodeDriver.connectionCls.conn_class = DimensionDataMockHttp NTTAmericaNodeDriver.connectionCls.active_api_version = '2.3' DimensionDataMockHttp.type = None self.driver = NTTAmericaNodeDriver('user', 'password') apache-libcloud-2.2.1/libcloud/test/compute/test_bsnl.py0000664000175000017500000000240313153541406023216 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.bsnl import BSNLNodeDriver from libcloud.test.compute.test_dimensiondata_v2_3 import DimensionDataMockHttp, DimensionData_v2_3_Tests class BSNLTests(DimensionData_v2_3_Tests, unittest.TestCase): def setUp(self): BSNLNodeDriver.connectionCls.conn_class = DimensionDataMockHttp BSNLNodeDriver.connectionCls.active_api_version = '2.3' DimensionDataMockHttp.type = None self.driver = BSNLNodeDriver('user', 'password') apache-libcloud-2.2.1/libcloud/test/compute/test_ikoula.py0000664000175000017500000000216312701023453023542 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.ikoula import IkoulaNodeDriver from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase from libcloud.test import unittest class IkoulaNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): driver_klass = IkoulaNodeDriver if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_kili.py0000664000175000017500000000276513153541406023223 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.kili import KiliCloudNodeDriver, ENDPOINT_ARGS from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs['get_endpoint_args'] = ENDPOINT_ARGS # Remove keystone from the URL path so that the openstack base tests work kwargs['ex_force_auth_url'] = 'https://api.kili.io/v2.0/tokens' kwargs['ex_tenant_name'] = self.tenant_name return kwargs KiliCloudNodeDriver._ex_connection_class_kwargs = _ex_connection_class_kwargs class KiliCloudNodeDriverTests(OpenStack_1_1_Tests, unittest.TestCase): driver_klass = KiliCloudNodeDriver driver_type = KiliCloudNodeDriver apache-libcloud-2.2.1/libcloud/test/compute/test_azure.py0000664000175000017500000005331213153541406023413 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.import libcloud import os import sys import libcloud.security from libcloud.common.types import LibcloudError from libcloud.compute.base import NodeAuthPassword, NodeImage, NodeSize from libcloud.test import unittest from libcloud.test import LibcloudTestCase from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.utils.py3 import httplib from libcloud.compute.base import Node, NodeState from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver class AzureNodeDriverTests(LibcloudTestCase): # required otherwise we get client side SSL verification libcloud.security.VERIFY_SSL_CERT = False SUBSCRIPTION_ID = '3761b98b-673d-526c-8d55-fee918758e6e' KEY_FILE = os.path.join(os.path.dirname(__file__), 'fixtures/azure/libcloud.pem') # empty file is fine def setUp(self): Azure = get_driver(Provider.AZURE) Azure.connectionCls.conn_class = AzureMockHttp self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE) def test_locations_returned_successfully(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 7) location_names_result = list(a.name for a in locations) location_names_expected = [ 'East Asia', 'Southeast Asia', 'North Europe', 'West Europe', 'East US', 'North Central US', 'West US' ] self.assertListEqual(location_names_result, location_names_expected) matched_location = next( location for location in locations if location.name == 'Southeast Asia' ) services_result = matched_location.available_services services_expected = [ 'Compute', 'Storage', 'PersistentVMRole', 'HighMemory' ] self.assertListEqual(services_result, services_expected) vm_role_sizes_result = matched_location.virtual_machine_role_sizes vm_role_sizes_expected = [ 'A5', 'A6', 'A7', 'Basic_A0', 'Basic_A1', 'Basic_A2', 'Basic_A3', 'Basic_A4', 'ExtraLarge', 'ExtraSmall', 'Large', 'Medium', 'Small' ] self.assertListEqual(vm_role_sizes_result, vm_role_sizes_expected) def test_images_returned_successfully(self): images = self.driver.list_images() # There should be 215 standard OSImages and one VMImage returned self.assertEqual(len(images), 216) def test_images_returned_successfully_filter_by_location(self): images = self.driver.list_images(location="West US") self.assertEqual(len(images), 207) def test_list_nodes_returned_successfully(self): vmimages = self.driver.list_nodes( ex_cloud_service_name="dcoddkinztest01" ) self.assertEqual(len(vmimages), 2) img0 = vmimages[0] self.assertEqual(img0.id, "dc03") self.assertEqual(img0.name, "dc03") self.assertListEqual(img0.public_ips, ["191.235.135.62"]) self.assertListEqual(img0.private_ips, ["100.92.66.69"]) self.assertEqual(img0.size, None) self.assertEqual(img0.state, NodeState.RUNNING) self.assertTrue(isinstance(img0.extra, dict)) extra = img0.extra self.assertEqual(extra["instance_size"], 'Small') self.assertEqual(extra["power_state"], 'Started') self.assertEqual(extra["ssh_port"], '22') def test_list_nodes_returned_no_deployments(self): nodes = self.driver.list_nodes( ex_cloud_service_name="dcoddkinztest03" ) self.assertEqual(nodes, []) def test_list_nodes_returned_no_cloud_service(self): with self.assertRaises(LibcloudError): self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest04") def test_restart_node_success(self): node = Node( id="dc03", name="dc03", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) result = self.driver.reboot_node( node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production" ) self.assertTrue(result) # simulating attempting to reboot a node that is already rebooting def test_restart_node_fail_no_deployment(self): node = Node( id="dc03", name="dc03", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) with self.assertRaises(LibcloudError): self.driver.reboot_node( node=node, ex_cloud_service_name="dcoddkinztest02", ex_deployment_slot="Production" ) def test_restart_node_fail_no_cloud_service(self): node = Node( id="dc03", name="dc03", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) with self.assertRaises(LibcloudError): self.driver.reboot_node( node=node, ex_cloud_service_name="dcoddkinztest03", ex_deployment_slot="Production" ) def test_restart_node_fail_node_not_found(self): node = Node( id="dc13", name="dc13", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) result = self.driver.reboot_node( node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production" ) self.assertFalse(result) def test_destroy_node_success_single_node_in_cloud_service(self): node = Node( id="oddkinz1", name="oddkinz1", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) result = self.driver.destroy_node( node=node, ex_cloud_service_name="oddkinz1", ex_deployment_slot="Production" ) self.assertTrue(result) def test_destroy_node_success_multiple_nodes_in_cloud_service(self): node = Node( id="oddkinz1", name="oddkinz1", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) result = self.driver.destroy_node( node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production" ) self.assertTrue(result) def test_destroy_node_fail_node_does_not_exist(self): node = Node( id="oddkinz2", name="oddkinz2", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) with self.assertRaises(LibcloudError): self.driver.destroy_node( node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production" ) def test_destroy_node_success_cloud_service_not_found(self): node = Node( id="cloudredis", name="cloudredis", state=NodeState.RUNNING, public_ips=[], private_ips=[], driver=self.driver ) with self.assertRaises(LibcloudError): self.driver.destroy_node( node=node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) def test_ex_create_cloud_service(self): result = self.driver.ex_create_cloud_service(name="testdc123", location="North Europe") self.assertTrue(result) def test_ex_create_cloud_service_service_exists(self): with self.assertRaises(LibcloudError): self.driver.ex_create_cloud_service( name="testdc1234", location="North Europe" ) def test_ex_destroy_cloud_service(self): result = self.driver.ex_destroy_cloud_service(name="testdc123") self.assertTrue(result) def test_ex_destroy_cloud_service_service_does_not_exist(self): with self.assertRaises(LibcloudError): self.driver.ex_destroy_cloud_service(name="testdc1234") def test_ex_create_storage_service(self): result = self.driver.ex_create_storage_service(name="testdss123", location="East US") self.assertTrue(result) def test_ex_create_storage_service_service_exists(self): with self.assertRaises(LibcloudError): self.driver.ex_create_storage_service( name="dss123", location="East US" ) def test_ex_destroy_storage_service(self): result = self.driver.ex_destroy_storage_service(name="testdss123") self.assertTrue(result) def test_ex_destroy_storage_service_service_does_not_exist(self): with self.assertRaises(LibcloudError): self.driver.ex_destroy_storage_service(name="dss123") def test_create_node_and_deployment_one_node(self): kwargs = { "ex_storage_service_name": "mtlytics", "ex_deployment_name": "dcoddkinztest02", "ex_deployment_slot": "Production", "ex_admin_user_id": "azurecoder" } auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"] = auth kwargs["name"] = "dcoddkinztest03" kwargs["size"] = NodeSize( id="ExtraSmall", name="ExtraSmall", ram=1024, disk="30gb", bandwidth=0, price=0, driver=self.driver ) kwargs["image"] = NodeImage( id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415", name="FakeImage", driver=self.driver, extra={ 'vm_image': False } ) result = self.driver.create_node( ex_cloud_service_name="testdcabc", **kwargs ) self.assertIsNotNone(result) def test_create_node_and_deployment_second_node(self): kwargs = { "ex_storage_service_name": "mtlytics", "ex_deployment_name": "dcoddkinztest02", "ex_deployment_slot": "Production", "ex_admin_user_id": "azurecoder" } auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"] = auth kwargs["size"] = NodeSize( id="ExtraSmall", name="ExtraSmall", ram=1024, disk="30gb", bandwidth=0, price=0, driver=self.driver ) kwargs["image"] = NodeImage( id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415", name="FakeImage", driver=self.driver, extra={ 'vm_image': False } ) kwargs["name"] = "dcoddkinztest03" result = self.driver.create_node( ex_cloud_service_name="testdcabc2", **kwargs ) self.assertIsNotNone(result) class AzureMockHttp(MockHttp): fixtures = ComputeFileFixtures('azure') def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deployments_dc01(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz1(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc03(self, method, url, body, headers): headers["x-ms-request-id"] = "acc33f6756cda6fd96826394fce4c9f3" return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_images(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_locations(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices(self, method, url, body, headers): # request url is the same irrespective of serviceName, only way to differentiate if "testdc123" in body: return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) elif "testdc1234" in body: return (httplib.CONFLICT, body, headers, httplib.responses[httplib.CONFLICT]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc123(self, method, url, body, headers): return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_storageservices(self, method, url, body, headers): # request url is the same irrespective of serviceName, only way to differentiate if "testdss123" in body: return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) elif "dss123" in body: return (httplib.CONFLICT, body, headers, httplib.responses[httplib.CONFLICT]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_storageservices_testdss123(self, method, url, body, headers): return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_storageservices_dss123(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_storageservices_dss123.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234(self, method, url, body, headers): return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): headers["x-ms-request-id"] = "acc33f6756cda6fd96826394fce4c9f3" return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments_dcoddkinztest02_roles(self, method, url, body, headers): headers["x-ms-request-id"] = "acc33f6756cda6fd96826394fce4c9f3" return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_cloudwatt.py0000664000175000017500000000250513153541406024271 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.cloudwatt import CloudwattNodeDriver from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests class CloudwattNodeDriverTests(OpenStack_1_1_Tests, unittest.TestCase): driver_klass = CloudwattNodeDriver driver_type = CloudwattNodeDriver # These tests dont work because cloudwatt doesn't pass, # auth tokens- hide them from the base class def test_ex_force_auth_token_passed_to_connection(self): pass def test_auth_token_without_base_url_raises_exception(self): pass apache-libcloud-2.2.1/libcloud/test/compute/test_openstack.py0000664000175000017500000025161413153541406024261 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import os import sys import unittest import datetime import pytest from libcloud.utils.iso8601 import UTC try: import simplejson as json except ImportError: import json from mock import Mock, patch import requests_mock from libcloud.utils.py3 import httplib from libcloud.utils.py3 import method_type from libcloud.utils.py3 import u from libcloud.common.base import LibcloudConnection from libcloud.common.types import InvalidCredsError, MalformedResponseError, \ LibcloudError from libcloud.compute.types import Provider, KeyPairDoesNotExistError, StorageVolumeState, \ VolumeSnapshotState from libcloud.compute.providers import get_driver from libcloud.compute.drivers.openstack import ( OpenStack_1_0_NodeDriver, OpenStack_1_1_NodeDriver, OpenStackSecurityGroup, OpenStackSecurityGroupRule, OpenStack_1_1_FloatingIpPool, OpenStack_1_1_FloatingIpAddress, OpenStackKeyPair, OpenStack_1_0_Connection, OpenStackNodeDriver ) from libcloud.compute.base import Node, NodeImage, NodeSize from libcloud.pricing import set_pricing, clear_pricing_data from libcloud.test import MockHttp, XML_HEADERS from libcloud.test.file_fixtures import ComputeFileFixtures, OpenStackFixtures from libcloud.test.compute import TestCaseMixin from libcloud.test.secrets import OPENSTACK_PARAMS BASE_DIR = os.path.abspath(os.path.split(__file__)[0]) def test_driver_instantiation_invalid_auth(): with pytest.raises(LibcloudError): d = OpenStackNodeDriver( 'user', 'correct_password', ex_force_auth_version='5.0', ex_force_auth_url='http://x.y.z.y:5000', ex_tenant_name='admin') d.list_nodes() class OpenStackAuthTests(unittest.TestCase): def setUp(self): OpenStack_1_0_NodeDriver.connectionCls = OpenStack_1_0_Connection OpenStack_1_0_NodeDriver.connectionCls.conn_class = LibcloudConnection def test_auth_host_passed(self): forced_auth = 'http://x.y.z.y:5000' d = OpenStack_1_0_NodeDriver( 'user', 'correct_password', ex_force_auth_version='2.0_password', ex_force_auth_url='http://x.y.z.y:5000', ex_tenant_name='admin') self.assertEqual(d._ex_force_auth_url, forced_auth) with requests_mock.Mocker() as mock: body2 = ComputeFileFixtures('openstack').load('_v2_0__auth.json') mock.register_uri('POST', 'http://x.y.z.y:5000/v2.0/tokens', text=body2, headers={'content-type': 'application/json; charset=UTF-8'}) d.connection._populate_hosts_and_request_paths() self.assertEqual(d.connection.host, 'test_endpoint.com') class OpenStack_1_0_Tests(TestCaseMixin, unittest.TestCase): should_list_locations = False should_list_volumes = False driver_klass = OpenStack_1_0_NodeDriver driver_args = OPENSTACK_PARAMS driver_kwargs = {} # driver_kwargs = {'ex_force_auth_version': '1.0'} @classmethod def create_driver(self): if self is not OpenStack_1_0_FactoryMethodTests: self.driver_type = self.driver_klass return self.driver_type(*self.driver_args, **self.driver_kwargs) def setUp(self): # monkeypatch get_endpoint because the base openstack driver doesn't actually # work with old devstack but this class/tests are still used by the rackspace # driver def get_endpoint(*args, **kwargs): return "https://servers.api.rackspacecloud.com/v1.0/slug" self.driver_klass.connectionCls.get_endpoint = get_endpoint self.driver_klass.connectionCls.conn_class = OpenStackMockHttp self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" OpenStackMockHttp.type = None self.driver = self.create_driver() # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() clear_pricing_data() @patch('libcloud.common.openstack.OpenStackServiceCatalog') def test_populate_hosts_and_requests_path(self, _): tomorrow = datetime.datetime.today() + datetime.timedelta(1) cls = self.driver_klass.connectionCls count = 5 # Test authentication and token re-use con = cls('username', 'key') osa = con.get_auth_class() mocked_auth_method = Mock() osa.authenticate = mocked_auth_method # Valid token returned on first call, should be reused. for i in range(0, count): con._populate_hosts_and_request_paths() if i == 0: osa.auth_token = '1234' osa.auth_token_expires = tomorrow self.assertEqual(mocked_auth_method.call_count, 1) osa.auth_token = None osa.auth_token_expires = None # ex_force_auth_token provided, authenticate should never be called con = cls('username', 'key', ex_force_base_url='http://ponies', ex_force_auth_token='1234') osa = con.get_auth_class() mocked_auth_method = Mock() osa.authenticate = mocked_auth_method for i in range(0, count): con._populate_hosts_and_request_paths() self.assertEqual(mocked_auth_method.call_count, 0) def test_auth_token_is_set(self): self.driver.connection._populate_hosts_and_request_paths() self.assertEqual( self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc") def test_auth_token_expires_is_set(self): self.driver.connection._populate_hosts_and_request_paths() expires = self.driver.connection.auth_token_expires self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00") def test_auth(self): if self.driver.connection._auth_version == '2.0': return OpenStackMockHttp.type = 'UNAUTHORIZED' try: self.driver = self.create_driver() self.driver.list_nodes() except InvalidCredsError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, InvalidCredsError)) else: self.fail('test should have thrown') def test_auth_missing_key(self): if self.driver.connection._auth_version == '2.0': return OpenStackMockHttp.type = 'UNAUTHORIZED_MISSING_KEY' try: self.driver = self.create_driver() self.driver.list_nodes() except MalformedResponseError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, MalformedResponseError)) else: self.fail('test should have thrown') def test_auth_server_error(self): if self.driver.connection._auth_version == '2.0': return OpenStackMockHttp.type = 'INTERNAL_SERVER_ERROR' try: self.driver = self.create_driver() self.driver.list_nodes() except MalformedResponseError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, MalformedResponseError)) else: self.fail('test should have thrown') def test_error_parsing_when_body_is_missing_message(self): OpenStackMockHttp.type = 'NO_MESSAGE_IN_ERROR_BODY' try: self.driver.list_images() except Exception: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, Exception)) else: self.fail('test should have thrown') def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 1) def test_list_nodes(self): OpenStackMockHttp.type = 'EMPTY' ret = self.driver.list_nodes() self.assertEqual(len(ret), 0) OpenStackMockHttp.type = None ret = self.driver.list_nodes() self.assertEqual(len(ret), 1) node = ret[0] self.assertEqual('67.23.21.33', node.public_ips[0]) self.assertTrue('10.176.168.218' in node.private_ips) self.assertEqual(node.extra.get('flavorId'), '1') self.assertEqual(node.extra.get('imageId'), '11') self.assertEqual(type(node.extra.get('metadata')), type(dict())) OpenStackMockHttp.type = 'METADATA' ret = self.driver.list_nodes() self.assertEqual(len(ret), 1) node = ret[0] self.assertEqual(type(node.extra.get('metadata')), type(dict())) self.assertEqual(node.extra.get('metadata').get('somekey'), 'somevalue') OpenStackMockHttp.type = None def test_list_images(self): ret = self.driver.list_images() expected = {10: {'serverId': None, 'status': 'ACTIVE', 'created': '2009-07-20T09:14:37-05:00', 'updated': '2009-07-20T09:14:37-05:00', 'progress': None, 'minDisk': None, 'minRam': None}, 11: {'serverId': '91221', 'status': 'ACTIVE', 'created': '2009-11-29T20:22:09-06:00', 'updated': '2009-11-29T20:24:08-06:00', 'progress': '100', 'minDisk': '5', 'minRam': '256'}} for ret_idx, extra in list(expected.items()): for key, value in list(extra.items()): self.assertEqual(ret[ret_idx].extra[key], value) def test_create_node(self): image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size) self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') def test_create_node_without_adminPass(self): OpenStackMockHttp.type = 'NO_ADMIN_PASS' image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size) self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra.get('password'), None) def test_create_node_ex_shared_ip_group(self): OpenStackMockHttp.type = 'EX_SHARED_IP_GROUP' image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size, ex_shared_ip_group_id='12345') self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') def test_create_node_with_metadata(self): OpenStackMockHttp.type = 'METADATA' image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) metadata = {'a': 'b', 'c': 'd'} files = {'/file1': 'content1', '/file2': 'content2'} node = self.driver.create_node(name='racktest', image=image, size=size, metadata=metadata, files=files) self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') self.assertEqual(node.extra.get('metadata'), metadata) def test_reboot_node(self): node = Node(id=72258, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.reboot() self.assertTrue(ret is True) def test_destroy_node(self): node = Node(id=72258, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.destroy() self.assertTrue(ret is True) def test_ex_limits(self): limits = self.driver.ex_limits() self.assertTrue("rate" in limits) self.assertTrue("absolute" in limits) def test_create_image(self): node = Node(id=444222, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) image = self.driver.create_image(node, "imgtest") self.assertEqual(image.name, "imgtest") self.assertEqual(image.id, "12345") def test_delete_image(self): image = NodeImage(id=333111, name='Ubuntu 8.10 (intrepid)', driver=self.driver) ret = self.driver.delete_image(image) self.assertTrue(ret) def test_ex_list_ip_addresses(self): ret = self.driver.ex_list_ip_addresses(node_id=72258) self.assertEqual(2, len(ret.public_addresses)) self.assertTrue('67.23.10.131' in ret.public_addresses) self.assertTrue('67.23.10.132' in ret.public_addresses) self.assertEqual(1, len(ret.private_addresses)) self.assertTrue('10.176.42.16' in ret.private_addresses) def test_ex_list_ip_groups(self): ret = self.driver.ex_list_ip_groups() self.assertEqual(2, len(ret)) self.assertEqual('1234', ret[0].id) self.assertEqual('Shared IP Group 1', ret[0].name) self.assertEqual('5678', ret[1].id) self.assertEqual('Shared IP Group 2', ret[1].name) self.assertTrue(ret[0].servers is None) def test_ex_list_ip_groups_detail(self): ret = self.driver.ex_list_ip_groups(details=True) self.assertEqual(2, len(ret)) self.assertEqual('1234', ret[0].id) self.assertEqual('Shared IP Group 1', ret[0].name) self.assertEqual(2, len(ret[0].servers)) self.assertEqual('422', ret[0].servers[0]) self.assertEqual('3445', ret[0].servers[1]) self.assertEqual('5678', ret[1].id) self.assertEqual('Shared IP Group 2', ret[1].name) self.assertEqual(3, len(ret[1].servers)) self.assertEqual('23203', ret[1].servers[0]) self.assertEqual('2456', ret[1].servers[1]) self.assertEqual('9891', ret[1].servers[2]) def test_ex_create_ip_group(self): ret = self.driver.ex_create_ip_group('Shared IP Group 1', '5467') self.assertEqual('1234', ret.id) self.assertEqual('Shared IP Group 1', ret.name) self.assertEqual(1, len(ret.servers)) self.assertEqual('422', ret.servers[0]) def test_ex_delete_ip_group(self): ret = self.driver.ex_delete_ip_group('5467') self.assertEqual(True, ret) def test_ex_share_ip(self): ret = self.driver.ex_share_ip('1234', '3445', '67.23.21.133') self.assertEqual(True, ret) def test_ex_unshare_ip(self): ret = self.driver.ex_unshare_ip('3445', '67.23.21.133') self.assertEqual(True, ret) def test_ex_resize(self): node = Node(id=444222, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) self.assertTrue(self.driver.ex_resize(node=node, size=size)) def test_ex_confirm_resize(self): node = Node(id=444222, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) self.assertTrue(self.driver.ex_confirm_resize(node=node)) def test_ex_revert_resize(self): node = Node(id=444222, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) self.assertTrue(self.driver.ex_revert_resize(node=node)) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 7, 'Wrong sizes count') for size in sizes: self.assertTrue(isinstance(size.price, float), 'Wrong size price type') if self.driver.api_name == 'openstack': self.assertEqual(size.price, 0, 'Size price should be zero by default') def test_list_sizes_with_specified_pricing(self): if self.driver.api_name != 'openstack': return pricing = dict((str(i), i) for i in range(1, 8)) set_pricing(driver_type='compute', driver_name='openstack', pricing=pricing) sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 7, 'Wrong sizes count') for size in sizes: self.assertTrue(isinstance(size.price, float), 'Wrong size price type') self.assertEqual(float(size.price), float(pricing[size.id])) class OpenStack_1_0_FactoryMethodTests(OpenStack_1_0_Tests): should_list_locations = False should_list_volumes = False driver_klass = OpenStack_1_0_NodeDriver driver_type = get_driver(Provider.OPENSTACK) driver_args = OPENSTACK_PARAMS + ('1.0',) def test_factory_method_invalid_version(self): try: self.driver_type(*(OPENSTACK_PARAMS + ('15.5',))) except NotImplementedError: pass else: self.fail('Exception was not thrown') class OpenStackMockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('openstack') auth_fixtures = OpenStackFixtures() json_content_headers = {'content-type': 'application/json; charset=UTF-8'} # fake auth token response def _v1_0(self, method, url, body, headers): headers = { 'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) def _v1_0_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _v1_0_INTERNAL_SERVER_ERROR(self, method, url, body, headers): return (httplib.INTERNAL_SERVER_ERROR, "

500: Internal Server Error

", {}, httplib.responses[httplib.INTERNAL_SERVER_ERROR]) def _v1_0_slug_images_detail_NO_MESSAGE_IN_ERROR_BODY(self, method, url, body, headers): body = self.fixtures.load('300_multiple_choices.json') return (httplib.MULTIPLE_CHOICES, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_0_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers): headers = { 'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', 'x-auth-tokenx': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) def _v2_0_tokens(self, method, url, body, headers): body = self.auth_fixtures.load('_v2_0__auth.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_EMPTY(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers_detail_empty.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers_detail.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_METADATA(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers_detail_metadata.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_images_333111(self, method, url, body, headers): if method != "DELETE": raise NotImplementedError() # this is currently used for deletion of an image # as such it should not accept GET/POST return(httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) def _v1_0_slug_images(self, method, url, body, headers): if method != "POST": raise NotImplementedError() # this is currently used for creation of new image with # POST request, don't handle GET to avoid possible confusion body = self.fixtures.load('v1_slug_images_post.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) def _v1_0_slug_images_detail(self, method, url, body, headers): body = self.fixtures.load('v1_slug_images_detail.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) def _v1_0_slug_servers_NO_ADMIN_PASS(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers_no_admin_pass.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) def _v1_0_slug_servers_EX_SHARED_IP_GROUP(self, method, url, body, headers): # test_create_node_ex_shared_ip_group # Verify that the body contains sharedIpGroupId XML element body = u(body) self.assertTrue(body.find('sharedIpGroupId="12345"') != -1) body = self.fixtures.load('v1_slug_servers.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) def _v1_0_slug_servers_METADATA(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers_metadata.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) def _v1_0_slug_servers_72258_action(self, method, url, body, headers): if method != "POST" or body[:8] != "500: Internal Server Error", {'content-type': 'text/html'}, httplib.responses[httplib.INTERNAL_SERVER_ERROR]) class OpenStack_1_1_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False should_list_volumes = True driver_klass = OpenStack_1_1_NodeDriver driver_type = OpenStack_1_1_NodeDriver driver_args = OPENSTACK_PARAMS driver_kwargs = {'ex_force_auth_version': '2.0'} @classmethod def create_driver(self): if self is not OpenStack_1_1_FactoryMethodTests: self.driver_type = self.driver_klass return self.driver_type(*self.driver_args, **self.driver_kwargs) def setUp(self): self.driver_klass.connectionCls.conn_class = OpenStack_2_0_MockHttp self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" OpenStackMockHttp.type = None OpenStack_1_1_MockHttp.type = None OpenStack_2_0_MockHttp.type = None self.driver = self.create_driver() # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() clear_pricing_data() self.node = self.driver.list_nodes()[1] def _force_reauthentication(self): """ Trash current auth token so driver will be forced to re-authentication on next request. """ self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url' self.driver.connection.auth_token = None self.driver.connection.auth_token_expires = None self.driver.connection._osa.auth_token = None self.driver.connection._osa.auth_token_expires = None def test_auth_token_is_set(self): self._force_reauthentication() self.driver.connection._populate_hosts_and_request_paths() self.assertEqual( self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc") def test_auth_token_expires_is_set(self): self._force_reauthentication() self.driver.connection._populate_hosts_and_request_paths() expires = self.driver.connection.auth_token_expires self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00") def test_ex_force_base_url(self): # change base url and trash the current auth token so we can # re-authenticate self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url' self.driver.connection.auth_token = None self.driver.connection._populate_hosts_and_request_paths() # assert that we use the base url and not the auth url self.assertEqual(self.driver.connection.host, 'ex_force_base_url.com') self.assertEqual(self.driver.connection.port, 666) self.assertEqual(self.driver.connection.request_path, '/forced_url') def test_get_endpoint_populates_host_port_and_request_path(self): # simulate a subclass overriding this method self.driver.connection.get_endpoint = lambda: 'http://endpoint_auth_url.com:1555/service_url' self.driver.connection.auth_token = None self.driver.connection._ex_force_base_url = None self.driver.connection._populate_hosts_and_request_paths() # assert that we use the result of get endpoint self.assertEqual(self.driver.connection.host, 'endpoint_auth_url.com') self.assertEqual(self.driver.connection.port, 1555) self.assertEqual(self.driver.connection.request_path, '/service_url') def test_set_auth_token_populates_host_port_and_request_path(self): # change base url and trash the current auth token so we can # re-authenticate self.driver.connection._ex_force_base_url = 'http://some_other_ex_force_base_url.com:1222/some-service' self.driver.connection.auth_token = "preset-auth-token" self.driver.connection._populate_hosts_and_request_paths() # assert that we use the base url and not the auth url self.assertEqual( self.driver.connection.host, 'some_other_ex_force_base_url.com') self.assertEqual(self.driver.connection.port, 1222) self.assertEqual(self.driver.connection.request_path, '/some-service') def test_auth_token_without_base_url_raises_exception(self): kwargs = { 'ex_force_auth_version': '2.0', 'ex_force_auth_token': 'preset-auth-token' } try: self.driver_type(*self.driver_args, **kwargs) self.fail('Expected failure setting auth token without base url') except LibcloudError: pass else: self.fail('Expected failure setting auth token without base url') def test_ex_force_auth_token_passed_to_connection(self): base_url = 'https://servers.api.rackspacecloud.com/v1.1/slug' kwargs = { 'ex_force_auth_version': '2.0', 'ex_force_auth_token': 'preset-auth-token', 'ex_force_base_url': base_url } driver = self.driver_type(*self.driver_args, **kwargs) driver.list_nodes() self.assertEqual(kwargs['ex_force_auth_token'], driver.connection.auth_token) self.assertEqual('servers.api.rackspacecloud.com', driver.connection.host) self.assertEqual('/v1.1/slug', driver.connection.request_path) self.assertEqual(443, driver.connection.port) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) node = nodes[0] self.assertEqual('12065', node.id) # test public IPv4 self.assertTrue('12.16.18.28' in node.public_ips) self.assertTrue('50.57.94.35' in node.public_ips) # fixed public ip self.assertTrue('1.1.1.1' in node.public_ips) # floating public ip self.assertTrue('2.2.2.2' in node.public_ips) # test public IPv6 self.assertTrue( '2001:4801:7808:52:16:3eff:fe47:788a' in node.public_ips) # test private IPv4 self.assertTrue('10.182.64.34' in node.private_ips) # fixed private ip self.assertTrue('10.3.3.3' in node.private_ips) # floating private ip self.assertTrue('192.168.3.3' in node.private_ips) self.assertTrue('172.16.1.1' in node.private_ips) # test private IPv6 self.assertTrue( 'fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips) # test creation date self.assertEqual(node.created_at, datetime.datetime(2011, 10, 11, 0, 51, 39, tzinfo=UTC)) self.assertEqual(node.extra.get('flavorId'), '2') self.assertEqual(node.extra.get('imageId'), '7') self.assertEqual(node.extra.get('metadata'), {}) self.assertEqual(node.extra['updated'], '2011-10-11T00:50:04Z') self.assertEqual(node.extra['created'], '2011-10-11T00:51:39Z') self.assertEqual(node.extra.get('userId'), 'rs-reach') self.assertEqual(node.extra.get('hostId'), '912566d83a13fbb357ea' '3f13c629363d9f7e1ba3f' '925b49f3d2ab725') self.assertEqual(node.extra.get('disk_config'), 'AUTO') self.assertEqual(node.extra.get('task_state'), 'spawning') self.assertEqual(node.extra.get('vm_state'), 'active') self.assertEqual(node.extra.get('power_state'), 1) self.assertEqual(node.extra.get('progress'), 25) self.assertEqual(node.extra.get('fault')['id'], 1234) self.assertTrue(node.extra.get('service_name') is not None) self.assertTrue(node.extra.get('uri') is not None) def test_list_nodes_no_image_id_attribute(self): # Regression test for LIBCLOD-455 self.driver_klass.connectionCls.conn_class.type = 'ERROR_STATE_NO_IMAGE_ID' nodes = self.driver.list_nodes() self.assertEqual(nodes[0].extra['imageId'], None) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 2) volume = volumes[0] self.assertEqual('cd76a3a1-c4ce-40f6-9b9f-07a61508938d', volume.id) self.assertEqual('test_volume_2', volume.name) self.assertEqual(StorageVolumeState.AVAILABLE, volume.state) self.assertEqual(2, volume.size) self.assertEqual(volume.extra, { 'description': '', 'attachments': [{ 'id': 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d', "device": "/dev/vdb", "serverId": "12065", "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", }], 'snapshot_id': None, 'state': 'available', 'location': 'nova', 'volume_type': 'None', 'metadata': {}, 'created_at': '2013-06-24T11:20:13.000000', }) # also test that unknown state resolves to StorageVolumeState.UNKNOWN volume = volumes[1] self.assertEqual('cfcec3bc-b736-4db5-9535-4c24112691b5', volume.id) self.assertEqual('test_volume', volume.name) self.assertEqual(50, volume.size) self.assertEqual(StorageVolumeState.UNKNOWN, volume.state) self.assertEqual(volume.extra, { 'description': 'some description', 'attachments': [], 'snapshot_id': '01f48111-7866-4cd2-986a-e92683c4a363', 'state': 'some-unknown-state', 'location': 'nova', 'volume_type': 'None', 'metadata': {}, 'created_at': '2013-06-21T12:39:02.000000', }) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 8, 'Wrong sizes count') for size in sizes: self.assertTrue(size.price is None or isinstance(size.price, float), 'Wrong size price type') self.assertTrue(isinstance(size.ram, int)) self.assertTrue(isinstance(size.vcpus, int)) self.assertTrue(isinstance(size.disk, int)) self.assertTrue(isinstance(size.swap, int)) self.assertTrue(isinstance(size.ephemeral_disk, int) or size.ephemeral_disk is None) self.assertTrue(isinstance(size.extra, dict)) if size.id == '1': self.assertEqual(size.ephemeral_disk, 40) self.assertEqual(size.extra, { "policy_class": "standard_flavor", "class": "standard1", "disk_io_index": "2", "number_of_data_disks": "0" }) self.assertEqual(sizes[0].vcpus, 8) def test_list_sizes_with_specified_pricing(self): pricing = dict((str(i), i * 5.0) for i in range(1, 9)) set_pricing(driver_type='compute', driver_name=self.driver.api_name, pricing=pricing) sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 8, 'Wrong sizes count') for size in sizes: self.assertTrue(isinstance(size.price, float), 'Wrong size price type') self.assertEqual(size.price, pricing[size.id], 'Size price should match') def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 13, 'Wrong images count') image = images[0] self.assertEqual(image.id, '13') self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') self.assertEqual(image.extra['updated'], '2011-08-06T18:14:02Z') self.assertEqual(image.extra['created'], '2011-08-06T18:13:11Z') self.assertEqual(image.extra['status'], 'ACTIVE') self.assertEqual(image.extra['metadata']['os_type'], 'windows') self.assertEqual( image.extra['serverId'], '52415800-8b69-11e0-9b19-734f335aa7b3') self.assertEqual(image.extra['minDisk'], 0) self.assertEqual(image.extra['minRam'], 0) def test_create_node(self): image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size) self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra['password'], 'racktestvJq7d3') self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') def test_create_node_with_ex_keyname_and_ex_userdata(self): image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size, ex_keyname='devstack', ex_userdata='sample data') self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra['password'], 'racktestvJq7d3') self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') self.assertEqual(node.extra['key_name'], 'devstack') def test_create_node_with_availability_zone(self): image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size, availability_zone='testaz') self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra['password'], 'racktestvJq7d3') self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') self.assertEqual(node.extra['availability_zone'], 'testaz') def test_create_node_with_ex_disk_config(self): OpenStack_1_1_MockHttp.type = 'EX_DISK_CONFIG' image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size, ex_disk_config='AUTO') self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra['disk_config'], 'AUTO') def test_create_node_with_ex_config_drive(self): OpenStack_1_1_MockHttp.type = 'EX_CONFIG_DRIVE' image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size, ex_config_drive=True) self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') self.assertEqual(node.name, 'racktest') self.assertTrue(node.extra['config_drive']) def test_destroy_node(self): self.assertTrue(self.node.destroy()) def test_reboot_node(self): self.assertTrue(self.node.reboot()) def test_create_volume(self): volume = self.driver.create_volume(1, 'test') self.assertEqual(volume.name, 'test') self.assertEqual(volume.size, 1) def test_create_volume_passes_location_to_request_only_if_not_none(self): with patch.object(self.driver.connection, 'request') as mock_request: self.driver.create_volume(1, 'test', location='mylocation') name, args, kwargs = mock_request.mock_calls[0] self.assertEqual(kwargs["data"]["volume"]["availability_zone"], "mylocation") def test_create_volume_does_not_pass_location_to_request_if_none(self): with patch.object(self.driver.connection, 'request') as mock_request: self.driver.create_volume(1, 'test') name, args, kwargs = mock_request.mock_calls[0] self.assertFalse("availability_zone" in kwargs["data"]["volume"]) def test_create_volume_passes_volume_type_to_request_only_if_not_none(self): with patch.object(self.driver.connection, 'request') as mock_request: self.driver.create_volume(1, 'test', ex_volume_type='myvolumetype') name, args, kwargs = mock_request.mock_calls[0] self.assertEqual(kwargs["data"]["volume"]["volume_type"], "myvolumetype") def test_create_volume_does_not_pass_volume_type_to_request_if_none(self): with patch.object(self.driver.connection, 'request') as mock_request: self.driver.create_volume(1, 'test') name, args, kwargs = mock_request.mock_calls[0] self.assertFalse("volume_type" in kwargs["data"]["volume"]) def test_destroy_volume(self): volume = self.driver.ex_get_volume( 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') self.assertEqual(self.driver.destroy_volume(volume), True) def test_attach_volume(self): node = self.driver.list_nodes()[0] volume = self.driver.ex_get_volume( 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') self.assertEqual( self.driver.attach_volume(node, volume, '/dev/sdb'), True) def test_detach_volume(self): node = self.driver.list_nodes()[0] volume = self.driver.ex_get_volume( 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') self.assertEqual( self.driver.attach_volume(node, volume, '/dev/sdb'), True) self.assertEqual(self.driver.detach_volume(volume), True) def test_ex_set_password(self): self.assertTrue(self.driver.ex_set_password(self.node, 'New1&53jPass')) def test_ex_rebuild(self): image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) success = self.driver.ex_rebuild(self.node, image=image) self.assertTrue(success) def test_ex_rebuild_with_ex_disk_config(self): image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)', driver=self.driver) node = Node(id=12066, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) success = self.driver.ex_rebuild(node, image=image, ex_disk_config='MANUAL') self.assertTrue(success) def test_ex_rebuild_with_ex_config_drive(self): image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)', driver=self.driver) node = Node(id=12066, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) success = self.driver.ex_rebuild(node, image=image, ex_disk_config='MANUAL', ex_config_drive=True) self.assertTrue(success) def test_ex_resize(self): size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) try: self.driver.ex_resize(self.node, size) except Exception: e = sys.exc_info()[1] self.fail('An error was raised: ' + repr(e)) def test_ex_confirm_resize(self): try: self.driver.ex_confirm_resize(self.node) except Exception: e = sys.exc_info()[1] self.fail('An error was raised: ' + repr(e)) def test_ex_revert_resize(self): try: self.driver.ex_revert_resize(self.node) except Exception: e = sys.exc_info()[1] self.fail('An error was raised: ' + repr(e)) def test_create_image(self): image = self.driver.create_image(self.node, 'new_image') self.assertEqual(image.name, 'new_image') self.assertEqual(image.id, '4949f9ee-2421-4c81-8b49-13119446008b') def test_ex_set_server_name(self): old_node = Node( id='12064', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) new_node = self.driver.ex_set_server_name(old_node, 'Bob') self.assertEqual('Bob', new_node.name) def test_ex_set_metadata(self): old_node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} returned_metadata = self.driver.ex_set_metadata(old_node, metadata) self.assertEqual(metadata, returned_metadata) def test_ex_get_metadata(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} returned_metadata = self.driver.ex_get_metadata(node) self.assertEqual(metadata, returned_metadata) def test_ex_update_node(self): old_node = Node( id='12064', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) new_node = self.driver.ex_update_node(old_node, name='Bob') self.assertTrue(new_node) self.assertEqual('Bob', new_node.name) self.assertEqual('50.57.94.30', new_node.public_ips[0]) def test_ex_get_node_details(self): node_id = '12064' node = self.driver.ex_get_node_details(node_id) self.assertEqual(node.id, '12064') self.assertEqual(node.name, 'lc-test') def test_ex_get_node_details_returns_none_if_node_does_not_exist(self): node = self.driver.ex_get_node_details('does-not-exist') self.assertTrue(node is None) def test_ex_get_size(self): size_id = '7' size = self.driver.ex_get_size(size_id) self.assertEqual(size.id, size_id) self.assertEqual(size.name, '15.5GB slice') def test_get_image(self): image_id = '13' image = self.driver.get_image(image_id) self.assertEqual(image.id, image_id) self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') self.assertEqual(image.extra['serverId'], None) self.assertEqual(image.extra['minDisk'], "5") self.assertEqual(image.extra['minRam'], "256") def test_delete_image(self): image = NodeImage( id='26365521-8c62-11f9-2c33-283d153ecc3a', name='My Backup', driver=self.driver) result = self.driver.delete_image(image) self.assertTrue(result) def test_extract_image_id_from_url(self): url = 'http://127.0.0.1/v1.1/68/images/1d4a8ea9-aae7-4242-a42d-5ff4702f2f14' url_two = 'http://127.0.0.1/v1.1/68/images/13' image_id = self.driver._extract_image_id_from_url(url) image_id_two = self.driver._extract_image_id_from_url(url_two) self.assertEqual(image_id, '1d4a8ea9-aae7-4242-a42d-5ff4702f2f14') self.assertEqual(image_id_two, '13') def test_ex_rescue_with_password(self): node = Node(id=12064, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) n = self.driver.ex_rescue(node, 'foo') self.assertEqual(n.extra['password'], 'foo') def test_ex_rescue_no_password(self): node = Node(id=12064, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) n = self.driver.ex_rescue(node) self.assertEqual(n.extra['password'], 'foo') def test_ex_unrescue(self): node = Node(id=12064, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) result = self.driver.ex_unrescue(node) self.assertTrue(result) def test_ex_get_node_security_groups(self): node = Node(id='1c01300f-ef97-4937-8f03-ac676d6234be', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) security_groups = self.driver.ex_get_node_security_groups(node) self.assertEqual( len(security_groups), 2, 'Wrong security groups count') security_group = security_groups[1] self.assertEqual(security_group.id, 4) self.assertEqual(security_group.tenant_id, '68') self.assertEqual(security_group.name, 'ftp') self.assertEqual( security_group.description, 'FTP Client-Server - Open 20-21 ports') self.assertEqual(security_group.rules[0].id, 1) self.assertEqual(security_group.rules[0].parent_group_id, 4) self.assertEqual(security_group.rules[0].ip_protocol, "tcp") self.assertEqual(security_group.rules[0].from_port, 20) self.assertEqual(security_group.rules[0].to_port, 21) self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0') def test_ex_list_security_groups(self): security_groups = self.driver.ex_list_security_groups() self.assertEqual( len(security_groups), 2, 'Wrong security groups count') security_group = security_groups[1] self.assertEqual(security_group.id, 4) self.assertEqual(security_group.tenant_id, '68') self.assertEqual(security_group.name, 'ftp') self.assertEqual( security_group.description, 'FTP Client-Server - Open 20-21 ports') self.assertEqual(security_group.rules[0].id, 1) self.assertEqual(security_group.rules[0].parent_group_id, 4) self.assertEqual(security_group.rules[0].ip_protocol, "tcp") self.assertEqual(security_group.rules[0].from_port, 20) self.assertEqual(security_group.rules[0].to_port, 21) self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0') def test_ex_create_security_group(self): name = 'test' description = 'Test Security Group' security_group = self.driver.ex_create_security_group( name, description) self.assertEqual(security_group.id, 6) self.assertEqual(security_group.tenant_id, '68') self.assertEqual(security_group.name, name) self.assertEqual(security_group.description, description) self.assertEqual(len(security_group.rules), 0) def test_ex_delete_security_group(self): security_group = OpenStackSecurityGroup( id=6, tenant_id=None, name=None, description=None, driver=self.driver) result = self.driver.ex_delete_security_group(security_group) self.assertTrue(result) def test_ex_create_security_group_rule(self): security_group = OpenStackSecurityGroup( id=6, tenant_id=None, name=None, description=None, driver=self.driver) security_group_rule = self.driver.ex_create_security_group_rule( security_group, 'tcp', 14, 16, '0.0.0.0/0') self.assertEqual(security_group_rule.id, 2) self.assertEqual(security_group_rule.parent_group_id, 6) self.assertEqual(security_group_rule.ip_protocol, 'tcp') self.assertEqual(security_group_rule.from_port, 14) self.assertEqual(security_group_rule.to_port, 16) self.assertEqual(security_group_rule.ip_range, '0.0.0.0/0') self.assertEqual(security_group_rule.tenant_id, None) def test_ex_delete_security_group_rule(self): security_group_rule = OpenStackSecurityGroupRule( id=2, parent_group_id=None, ip_protocol=None, from_port=None, to_port=None, driver=self.driver) result = self.driver.ex_delete_security_group_rule(security_group_rule) self.assertTrue(result) def test_list_key_pairs(self): keypairs = self.driver.list_key_pairs() self.assertEqual(len(keypairs), 2, 'Wrong keypairs count') keypair = keypairs[1] self.assertEqual(keypair.name, 'key2') self.assertEqual( keypair.fingerprint, '5d:66:33:ae:99:0f:fb:cb:86:f2:bc:ae:53:99:b6:ed') self.assertTrue(len(keypair.public_key) > 10) self.assertEqual(keypair.private_key, None) def test_get_key_pair(self): key_pair = self.driver.get_key_pair(name='test-key-pair') self.assertEqual(key_pair.name, 'test-key-pair') def test_get_key_pair_doesnt_exist(self): self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, name='doesnt-exist') def test_create_key_pair(self): name = 'key0' keypair = self.driver.create_key_pair(name=name) self.assertEqual(keypair.name, name) self.assertEqual(keypair.fingerprint, '80:f8:03:a7:8e:c1:c3:b1:7e:c5:8c:50:04:5e:1c:5b') self.assertTrue(len(keypair.public_key) > 10) self.assertTrue(len(keypair.private_key) > 10) def test_import_key_pair_from_file(self): name = 'key3' path = os.path.join( os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') pub_key = open(path, 'r').read() keypair = self.driver.import_key_pair_from_file(name=name, key_file_path=path) self.assertEqual(keypair.name, name) self.assertEqual( keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a') self.assertEqual(keypair.public_key, pub_key) self.assertEqual(keypair.private_key, None) def test_import_key_pair_from_string(self): name = 'key3' path = os.path.join( os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') pub_key = open(path, 'r').read() keypair = self.driver.import_key_pair_from_string(name=name, key_material=pub_key) self.assertEqual(keypair.name, name) self.assertEqual( keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a') self.assertEqual(keypair.public_key, pub_key) self.assertEqual(keypair.private_key, None) def test_delete_key_pair(self): keypair = OpenStackKeyPair( name='key1', fingerprint=None, public_key=None, driver=self.driver) result = self.driver.delete_key_pair(key_pair=keypair) self.assertTrue(result) def test_ex_list_floating_ip_pools(self): ret = self.driver.ex_list_floating_ip_pools() self.assertEqual(ret[0].name, 'public') self.assertEqual(ret[1].name, 'foobar') def test_ex_attach_floating_ip_to_node(self): image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size) node.id = 4242 ip = '42.42.42.42' self.assertTrue(self.driver.ex_attach_floating_ip_to_node(node, ip)) def test_detach_floating_ip_from_node(self): image = NodeImage( id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize( 1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='racktest', image=image, size=size) node.id = 4242 ip = '42.42.42.42' self.assertTrue(self.driver.ex_detach_floating_ip_from_node(node, ip)) def test_OpenStack_1_1_FloatingIpPool_list_floating_ips(self): pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) ret = pool.list_floating_ips() self.assertEqual(ret[0].id, '09ea1784-2f81-46dc-8c91-244b4df75bde') self.assertEqual(ret[0].pool, pool) self.assertEqual(ret[0].ip_address, '10.3.1.42') self.assertEqual(ret[0].node_id, None) self.assertEqual(ret[1].id, '04c5336a-0629-4694-ba30-04b0bdfa88a4') self.assertEqual(ret[1].pool, pool) self.assertEqual(ret[1].ip_address, '10.3.1.1') self.assertEqual( ret[1].node_id, 'fcfc96da-19e2-40fd-8497-f29da1b21143') def test_OpenStack_1_1_FloatingIpPool_get_floating_ip(self): pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) ret = pool.get_floating_ip('10.3.1.42') self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde') self.assertEqual(ret.pool, pool) self.assertEqual(ret.ip_address, '10.3.1.42') self.assertEqual(ret.node_id, None) def test_OpenStack_1_1_FloatingIpPool_create_floating_ip(self): pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) ret = pool.create_floating_ip() self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde') self.assertEqual(ret.pool, pool) self.assertEqual(ret.ip_address, '10.3.1.42') self.assertEqual(ret.node_id, None) def test_OpenStack_1_1_FloatingIpPool_delete_floating_ip(self): pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool) self.assertTrue(pool.delete_floating_ip(ip)) def test_OpenStack_1_1_FloatingIpAddress_delete(self): pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) pool.delete_floating_ip = Mock() ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool) ip.pool.delete_floating_ip() self.assertEqual(pool.delete_floating_ip.call_count, 1) def test_ex_list_network(self): networks = self.driver.ex_list_networks() network = networks[0] self.assertEqual(len(networks), 3) self.assertEqual(network.name, 'test1') self.assertEqual(network.cidr, '127.0.0.0/24') def test_ex_create_network(self): network = self.driver.ex_create_network(name='test1', cidr='127.0.0.0/24') self.assertEqual(network.name, 'test1') self.assertEqual(network.cidr, '127.0.0.0/24') def test_ex_delete_network(self): network = self.driver.ex_list_networks()[0] self.assertTrue(self.driver.ex_delete_network(network=network)) def test_ex_get_metadata_for_node(self): image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size) metadata = self.driver.ex_get_metadata_for_node(node) self.assertEqual(metadata['My Server Name'], 'Apache1') self.assertEqual(len(metadata), 1) def test_ex_pause_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_pause_node(node) self.assertTrue(ret is True) def test_ex_unpause_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_unpause_node(node) self.assertTrue(ret is True) def test_ex_stop_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_stop_node(node) self.assertTrue(ret is True) def test_ex_start_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_start_node(node) self.assertTrue(ret is True) def test_ex_suspend_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_suspend_node(node) self.assertTrue(ret is True) def test_ex_resume_node(self): node = Node( id='12063', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) ret = self.driver.ex_resume_node(node) self.assertTrue(ret is True) def test_ex_get_console_output(self): node = Node( id='12086', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, ) resp = self.driver.ex_get_console_output(node) expected_output = 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE' self.assertEqual(resp['output'], expected_output) def test_ex_list_snapshots(self): if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' snapshots = self.driver.ex_list_snapshots() self.assertEqual(len(snapshots), 3) self.assertEqual(snapshots[0].created, datetime.datetime(2012, 2, 29, 3, 50, 7, tzinfo=UTC)) self.assertEqual(snapshots[0].extra['created'], "2012-02-29T03:50:07Z") self.assertEqual(snapshots[0].extra['name'], 'snap-001') self.assertEqual(snapshots[0].name, 'snap-001') self.assertEqual(snapshots[0].state, VolumeSnapshotState.AVAILABLE) # invalid date is parsed as None assert snapshots[2].created is None def test_list_volume_snapshots(self): volume = self.driver.list_volumes()[0] # rackspace needs a different mocked response for snapshots, but not for volumes if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' snapshots = self.driver.list_volume_snapshots(volume) self.assertEqual(len(snapshots), 1) self.assertEqual(snapshots[0].id, '4fbbdccf-e058-6502-8844-6feeffdf4cb5') def test_create_volume_snapshot(self): volume = self.driver.list_volumes()[0] if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' ret = self.driver.create_volume_snapshot(volume, 'Test Volume', ex_description='This is a test', ex_force=True) self.assertEqual(ret.id, '3fbbcccf-d058-4502-8844-6feeffdf4cb5') def test_ex_create_snapshot(self): volume = self.driver.list_volumes()[0] if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' ret = self.driver.ex_create_snapshot(volume, 'Test Volume', description='This is a test', force=True) self.assertEqual(ret.id, '3fbbcccf-d058-4502-8844-6feeffdf4cb5') def test_ex_create_snapshot_does_not_post_optional_parameters_if_none(self): volume = self.driver.list_volumes()[0] with patch.object(self.driver, '_to_snapshot'): with patch.object(self.driver.connection, 'request') as mock_request: self.driver.create_volume_snapshot(volume, name=None, ex_description=None, ex_force=True) name, args, kwargs = mock_request.mock_calls[0] self.assertFalse("display_name" in kwargs["data"]["snapshot"]) self.assertFalse("display_description" in kwargs["data"]["snapshot"]) def test_destroy_volume_snapshot(self): if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' snapshot = self.driver.ex_list_snapshots()[0] ret = self.driver.destroy_volume_snapshot(snapshot) self.assertTrue(ret) def test_ex_delete_snapshot(self): if self.driver_type.type == 'rackspace': self.conn_class.type = 'RACKSPACE' snapshot = self.driver.ex_list_snapshots()[0] ret = self.driver.ex_delete_snapshot(snapshot) self.assertTrue(ret) class OpenStack_1_1_FactoryMethodTests(OpenStack_1_1_Tests): should_list_locations = False should_list_volumes = True driver_klass = OpenStack_1_1_NodeDriver driver_type = get_driver(Provider.OPENSTACK) driver_args = OPENSTACK_PARAMS + ('1.1',) driver_kwargs = {'ex_force_auth_version': '2.0'} class OpenStack_1_1_MockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('openstack_v1.1') auth_fixtures = OpenStackFixtures() json_content_headers = {'content-type': 'application/json; charset=UTF-8'} def _v2_0_tokens(self, method, url, body, headers): body = self.auth_fixtures.load('_v2_0__auth.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_0(self, method, url, body, headers): headers = { 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', 'x-server-management-url': 'https://api.example.com/v1.1/slug', } return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) def _v1_1_slug_servers_detail(self, method, url, body, headers): body = self.fixtures.load('_servers_detail.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_detail_ERROR_STATE_NO_IMAGE_ID(self, method, url, body, headers): body = self.fixtures.load('_servers_detail_ERROR_STATE.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v2_1337_servers_does_not_exist(self, *args, **kwargs): return httplib.NOT_FOUND, None, {}, httplib.responses[httplib.NOT_FOUND] def _v1_1_slug_flavors_detail(self, method, url, body, headers): body = self.fixtures.load('_flavors_detail.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_images_detail(self, method, url, body, headers): body = self.fixtures.load('_images_detail.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers(self, method, url, body, headers): if method == "POST": body = self.fixtures.load('_servers_create.json') elif method == "GET": body = self.fixtures.load('_servers.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( '_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12065_action(self, method, url, body, headers): if method != "POST": self.fail('HTTP method other than POST to action URL') return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) def _v1_1_slug_servers_12064_action(self, method, url, body, headers): if method != "POST": self.fail('HTTP method other than POST to action URL') if "createImage" in json.loads(body): return (httplib.ACCEPTED, "", {"location": "http://127.0.0.1/v1.1/68/images/4949f9ee-2421-4c81-8b49-13119446008b"}, httplib.responses[httplib.ACCEPTED]) elif "rescue" in json.loads(body): return (httplib.OK, '{"adminPass": "foo"}', {}, httplib.responses[httplib.OK]) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) def _v1_1_slug_servers_12066_action(self, method, url, body, headers): if method != "POST": self.fail('HTTP method other than POST to action URL') if "rebuild" not in json.loads(body): self.fail("Did not get expected action (rebuild) in action URL") self.assertTrue('\"OS-DCF:diskConfig\": \"MANUAL\"' in body, msg="Manual disk configuration option was not specified in rebuild body: " + body) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) def _v1_1_slug_servers_12065(self, method, url, body, headers): if method == "DELETE": return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) else: raise NotImplementedError() def _v1_1_slug_servers_12064(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_servers_12064.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == "PUT": body = self.fixtures.load('_servers_12064_updated_name_bob.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == "DELETE": return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) else: raise NotImplementedError() def _v1_1_slug_servers_12062(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_servers_12064.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12063_metadata(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_servers_12063_metadata_two_keys.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == "PUT": body = self.fixtures.load('_servers_12063_metadata_two_keys.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_EX_DISK_CONFIG(self, method, url, body, headers): if method == "POST": body = u(body) self.assertTrue(body.find('\"OS-DCF:diskConfig\": \"AUTO\"')) body = self.fixtures.load('_servers_create_disk_config.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_flavors_7(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_flavors_7.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_images_13(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_images_13.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_images_26365521_8c62_11f9_2c33_283d153ecc3a(self, method, url, body, headers): if method == "DELETE": return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) else: raise NotImplementedError() def _v1_1_slug_images_4949f9ee_2421_4c81_8b49_13119446008b(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( '_images_4949f9ee_2421_4c81_8b49_13119446008b.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_servers_1c01300f_ef97_4937_8f03_ac676d6234be_os_security_groups(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( '_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_security_groups(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_os_security_groups.json') elif method == "POST": body = self.fixtures.load('_os_security_groups_create.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_security_groups_6(self, method, url, body, headers): if method == "DELETE": return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) else: raise NotImplementedError() def _v1_1_slug_os_security_group_rules(self, method, url, body, headers): if method == "POST": body = self.fixtures.load('_os_security_group_rules_create.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_security_group_rules_2(self, method, url, body, headers): if method == "DELETE": return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) else: raise NotImplementedError() def _v1_1_slug_os_keypairs(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_os_keypairs.json') elif method == "POST": if 'public_key' in body: body = self.fixtures.load('_os_keypairs_create_import.json') else: body = self.fixtures.load('_os_keypairs_create.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_keypairs_test_key_pair(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_keypairs_get_one.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_keypairs_doesnt_exist(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_keypairs_not_found.json') else: raise NotImplementedError() return (httplib.NOT_FOUND, body, self.json_content_headers, httplib.responses[httplib.NOT_FOUND]) def _v1_1_slug_os_keypairs_key1(self, method, url, body, headers): if method == "DELETE": return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) else: raise NotImplementedError() def _v1_1_slug_os_volumes(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_os_volumes.json') elif method == "POST": body = self.fixtures.load('_os_volumes_create.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( '_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json') elif method == "DELETE": body = '' else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12065_os_volume_attachments(self, method, url, body, headers): if method == "POST": body = self.fixtures.load( '_servers_12065_os_volume_attachments.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12065_os_volume_attachments_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers): if method == "DELETE": body = '' else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_floating_ip_pools(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_floating_ip_pools.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_os_floating_ips_foo_bar_id(self, method, url, body, headers): if method == "DELETE": body = '' return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_os_floating_ips(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_floating_ips.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == "POST": body = self.fixtures.load('_floating_ip.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_servers_4242_action(self, method, url, body, headers): if method == "POST": body = '' return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_networks(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_networks.json') return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) elif method == 'POST': body = self.fixtures.load('_os_networks_POST.json') return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v1_1_slug_os_networks_f13e5051_feea_416b_827a_1a0acc2dad14(self, method, url, body, headers): if method == 'DELETE': body = '' return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) raise NotImplementedError() def _v1_1_slug_servers_72258_action(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('_servers_suspend.json') return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12063_action(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('_servers_unpause.json') return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_servers_12086_action(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('_servers_12086_console_output.json') return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) else: raise NotImplementedError() def _v1_1_slug_os_snapshots(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_snapshots.json') elif method == 'POST': body = self.fixtures.load('_os_snapshots_create.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_snapshots_RACKSPACE(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_os_snapshots_rackspace.json') elif method == 'POST': body = self.fixtures.load('_os_snapshots_create_rackspace.json') else: raise NotImplementedError() return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5(self, method, url, body, headers): if method == 'DELETE': body = '' status_code = httplib.NO_CONTENT else: raise NotImplementedError() return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK]) def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5_RACKSPACE(self, method, url, body, headers): if method == 'DELETE': body = '' status_code = httplib.NO_CONTENT else: raise NotImplementedError() return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK]) # This exists because the nova compute url in devstack has v2 in there but the v1.1 fixtures # work fine. class OpenStack_2_0_MockHttp(OpenStack_1_1_MockHttp): def __init__(self, *args, **kwargs): super(OpenStack_2_0_MockHttp, self).__init__(*args, **kwargs) methods1 = OpenStack_1_1_MockHttp.__dict__ names1 = [m for m in methods1 if m.find('_v1_1') == 0] for name in names1: method = methods1[name] new_name = name.replace('_v1_1_slug_', '_v2_1337_') setattr(self, new_name, method_type(method, self, OpenStack_2_0_MockHttp)) class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests): driver_args = OPENSTACK_PARAMS + ('1.1',) driver_kwargs = {'ex_force_auth_version': '2.0'} def setUp(self): self.driver_klass.connectionCls.conn_class = OpenStack_2_0_MockHttp self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" OpenStackMockHttp.type = None OpenStack_1_1_MockHttp.type = None OpenStack_2_0_MockHttp.type = None self.driver = self.create_driver() # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() clear_pricing_data() self.node = self.driver.list_nodes()[1] def test_auth_user_info_is_set(self): self.driver.connection._populate_hosts_and_request_paths() self.assertEqual(self.driver.connection.auth_user_info, { 'id': '7', 'name': 'testuser', 'roles': [{'description': 'Default Role.', 'id': 'identity:default', 'name': 'identity:default'}]}) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_indosat.py0000664000175000017500000000243713153541406023730 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from libcloud.compute.drivers.indosat import IndosatNodeDriver from libcloud.test.compute.test_dimensiondata_v2_3 import DimensionDataMockHttp, DimensionData_v2_3_Tests class IndosatNodeDriverTests(DimensionData_v2_3_Tests, unittest.TestCase): def setUp(self): IndosatNodeDriver.connectionCls.conn_class = DimensionDataMockHttp IndosatNodeDriver.connectionCls.active_api_version = '2.3' DimensionDataMockHttp.type = None self.driver = IndosatNodeDriver('user', 'password') apache-libcloud-2.2.1/libcloud/test/compute/test_gogrid.py0000664000175000017500000002640213153541406023540 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qs from libcloud.compute.base import NodeState, NodeLocation from libcloud.common.types import LibcloudError, InvalidCredsError from libcloud.common.gogrid import GoGridIpAddress from libcloud.compute.drivers.gogrid import GoGridNodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize from libcloud.test import MockHttp # pylint: disable-msg=E0611 from libcloud.test.compute import TestCaseMixin # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 class GoGridTests(unittest.TestCase, TestCaseMixin): def setUp(self): GoGridNodeDriver.connectionCls.conn_class = GoGridMockHttp GoGridMockHttp.type = None self.driver = GoGridNodeDriver("foo", "bar") def _get_test_512Mb_node_size(self): return NodeSize(id='512Mb', name=None, ram=None, disk=None, bandwidth=None, price=None, driver=self.driver) def test_create_node(self): image = NodeImage(1531, None, self.driver) node = self.driver.create_node( name='test1', image=image, size=self._get_test_512Mb_node_size()) self.assertEqual(node.name, 'test1') self.assertTrue(node.id is not None) self.assertEqual(node.extra['password'], 'bebebe') def test_list_nodes(self): node = self.driver.list_nodes()[0] self.assertEqual(node.id, '90967') self.assertEqual(node.extra['password'], 'bebebe') self.assertEqual(node.extra['description'], 'test server') def test_reboot_node(self): node = Node(90967, None, None, None, None, self.driver) ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_reboot_node_not_successful(self): GoGridMockHttp.type = 'FAIL' node = Node(90967, None, None, None, None, self.driver) try: self.driver.reboot_node(node) except Exception: pass else: self.fail('Exception was not thrown') def test_destroy_node(self): node = Node(90967, None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(len(images), 4) self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') self.assertEqual(image.id, '1531') location = NodeLocation( id='gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img', name='test location', country='Slovenia', driver=self.driver) images = self.driver.list_images(location=location) image = images[0] self.assertEqual(len(images), 4) self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') self.assertEqual(image.id, '1531') def test_malformed_reply(self): GoGridMockHttp.type = 'FAIL' try: self.driver.list_images() except LibcloudError: e = sys.exc_info()[1] self.assertTrue(isinstance(e, LibcloudError)) else: self.fail("test should have thrown") def test_invalid_creds(self): GoGridMockHttp.type = 'FAIL' try: self.driver.list_nodes() except InvalidCredsError: e = sys.exc_info()[1] self.assertTrue(e.driver is not None) self.assertEqual(e.driver.name, self.driver.name) else: self.fail("test should have thrown") def test_node_creation_without_free_public_ips(self): GoGridMockHttp.type = 'NOPUBIPS' try: image = NodeImage(1531, None, self.driver) self.driver.create_node( name='test1', image=image, size=self._get_test_512Mb_node_size()) except LibcloudError: e = sys.exc_info()[1] self.assertTrue(isinstance(e, LibcloudError)) self.assertTrue(e.driver is not None) self.assertEqual(e.driver.name, self.driver.name) else: self.fail("test should have thrown") def test_list_locations(self): locations = self.driver.list_locations() location_names = [location.name for location in locations] self.assertEqual(len(locations), 2) for i in 0, 1: self.assertTrue(isinstance(locations[i], NodeLocation)) self.assertTrue("US-West-1" in location_names) self.assertTrue("US-East-1" in location_names) def test_ex_save_image(self): node = self.driver.list_nodes()[0] image = self.driver.ex_save_image(node, "testimage") self.assertEqual(image.name, "testimage") def test_ex_edit_image(self): image = self.driver.list_images()[0] ret = self.driver.ex_edit_image(image=image, public=False, ex_description="test", name="testname") self.assertTrue(isinstance(ret, NodeImage)) def test_ex_edit_node(self): node = Node(id=90967, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_edit_node(node=node, size=self._get_test_512Mb_node_size()) self.assertTrue(isinstance(ret, Node)) def test_ex_list_ips(self): ips = self.driver.ex_list_ips() expected_ips = {"192.168.75.66": GoGridIpAddress(id="5348099", ip="192.168.75.66", public=True, state="Unassigned", subnet="192.168.75.64/255.255.255.240"), "192.168.75.67": GoGridIpAddress(id="5348100", ip="192.168.75.67", public=True, state="Assigned", subnet="192.168.75.64/255.255.255.240"), "192.168.75.68": GoGridIpAddress(id="5348101", ip="192.168.75.68", public=False, state="Unassigned", subnet="192.168.75.64/255.255.255.240")} self.assertEqual(len(expected_ips), 3) for ip in ips: self.assertTrue(ip.ip in expected_ips) self.assertEqual(ip.public, expected_ips[ip.ip].public) self.assertEqual(ip.state, expected_ips[ip.ip].state) self.assertEqual(ip.subnet, expected_ips[ip.ip].subnet) del expected_ips[ip.ip] self.assertEqual(len(expected_ips), 0) def test_get_state_invalid(self): state = self.driver._get_state('invalid') self.assertEqual(state, NodeState.UNKNOWN) class GoGridMockHttp(MockHttp): fixtures = ComputeFileFixtures('gogrid') def _api_grid_image_list(self, method, url, body, headers): body = self.fixtures.load('image_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_image_list_FAIL(self, method, url, body, headers): body = "

some non valid json here

" return (httplib.SERVICE_UNAVAILABLE, body, {}, httplib.responses[httplib.SERVICE_UNAVAILABLE]) def _api_grid_server_list(self, method, url, body, headers): body = self.fixtures.load('server_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) _api_grid_server_list_NOPUBIPS = _api_grid_server_list def _api_grid_server_list_FAIL(self, method, url, body, headers): return (httplib.FORBIDDEN, "123", {}, httplib.responses[httplib.FORBIDDEN]) def _api_grid_ip_list(self, method, url, body, headers): body = self.fixtures.load('ip_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_ip_list_NOPUBIPS(self, method, url, body, headers): body = self.fixtures.load('ip_list_empty.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_server_power(self, method, url, body, headers): body = self.fixtures.load('server_power.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_server_power_FAIL(self, method, url, body, headers): body = self.fixtures.load('server_power_fail.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) def _api_grid_server_add(self, method, url, body, headers): body = self.fixtures.load('server_add.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) _api_grid_server_add_NOPUBIPS = _api_grid_server_add def _api_grid_server_delete(self, method, url, body, headers): body = self.fixtures.load('server_delete.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_server_edit(self, method, url, body, headers): body = self.fixtures.load('server_edit.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_support_password_list(self, method, url, body, headers): body = self.fixtures.load('password_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) _api_support_password_list_NOPUBIPS = _api_support_password_list def _api_grid_image_save(self, method, url, body, headers): body = self.fixtures.load('image_save.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_image_edit(self, method, url, body, headers): # edit method is quite similar to save method from the response # perspective body = self.fixtures.load('image_save.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_common_lookup_list(self, method, url, body, headers): _valid_lookups = ("ip.datacenter",) lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0] if lookup in _valid_lookups: fixture_path = "lookup_list_%s.json" % \ (lookup.replace(".", "_")) else: raise NotImplementedError body = self.fixtures.load(fixture_path) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_dimensiondata_v2_4.py0000664000175000017500000047734413153541406025755 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from types import GeneratorType from libcloud.utils.py3 import httplib from libcloud.utils.py3 import ET from libcloud.common.types import InvalidCredsError from libcloud.common.dimensiondata import DimensionDataAPIException, NetworkDomainServicePlan from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification, DimensionDataServerDisk, DimensionDataServerVMWareTools from libcloud.common.dimensiondata import DimensionDataTag, DimensionDataTagKey from libcloud.common.dimensiondata import DimensionDataIpAddress, \ DimensionDataIpAddressList, DimensionDataChildIpAddressList, \ DimensionDataPortList, DimensionDataPort, DimensionDataChildPortList from libcloud.common.dimensiondata import TYPES_URN from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData from libcloud.compute.drivers.dimensiondata import DimensionDataNic from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS from libcloud.utils.xml import fixxpath, findtext, findall class DimensionData_v2_4_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.4' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_nodes() def test_get_account_details(self): DimensionDataMockHttp.type = None ret = self.driver.connection.get_account_details() self.assertEqual(ret.full_name, 'Test User') self.assertEqual(ret.first_name, 'Test') self.assertEqual(ret.email, 'test@example.com') def test_list_locations_response(self): DimensionDataMockHttp.type = None ret = self.driver.list_locations() self.assertEqual(len(ret), 5) first_loc = ret[0] self.assertEqual(first_loc.id, 'NA3') self.assertEqual(first_loc.name, 'US - West') self.assertEqual(first_loc.country, 'US') def test_list_nodes_response(self): DimensionDataMockHttp.type = None ret = self.driver.list_nodes() self.assertEqual(len(ret), 7) def test_node_extras(self): DimensionDataMockHttp.type = None ret = self.driver.list_nodes() self.assertTrue(isinstance(ret[0].extra['vmWareTools'], DimensionDataServerVMWareTools)) self.assertTrue(isinstance(ret[0].extra['cpu'], DimensionDataServerCpuSpecification)) self.assertTrue(isinstance(ret[0].extra['disks'], list)) self.assertTrue(isinstance(ret[0].extra['disks'][0], DimensionDataServerDisk)) self.assertEqual(ret[0].extra['disks'][0].size_gb, 10) self.assertTrue(isinstance(ret[1].extra['disks'], list)) self.assertTrue(isinstance(ret[1].extra['disks'][0], DimensionDataServerDisk)) self.assertEqual(ret[1].extra['disks'][0].size_gb, 10) def test_server_states(self): DimensionDataMockHttp.type = None ret = self.driver.list_nodes() self.assertTrue(ret[0].state == 'running') self.assertTrue(ret[1].state == 'starting') self.assertTrue(ret[2].state == 'stopping') self.assertTrue(ret[3].state == 'reconfiguring') self.assertTrue(ret[4].state == 'running') self.assertTrue(ret[5].state == 'terminated') self.assertTrue(ret[6].state == 'stopped') self.assertEqual(len(ret), 7) def test_list_nodes_response_PAGINATED(self): DimensionDataMockHttp.type = 'PAGINATED' ret = self.driver.list_nodes() self.assertEqual(len(ret), 9) def test_paginated_mcp2_call_EMPTY(self): # cache org self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'EMPTY' node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server') empty_node_list = [] for node_list in node_list_generator: empty_node_list.extend(node_list) self.assertTrue(len(empty_node_list) == 0) def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self): # cache org self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'PAGED_THEN_EMPTY' node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server') final_node_list = [] for node_list in node_list_generator: final_node_list.extend(node_list) self.assertTrue(len(final_node_list) == 2) def test_paginated_mcp2_call_with_page_size(self): # cache org self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'PAGESIZE50' node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server', page_size=50) self.assertTrue(isinstance(node_list_generator, GeneratorType)) # We're making sure here the filters make it to the URL # See _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts def test_list_nodes_response_strings_ALLFILTERS(self): DimensionDataMockHttp.type = 'ALLFILTERS' ret = self.driver.list_nodes(ex_location='fake_loc', ex_name='fake_name', ex_ipv6='fake_ipv6', ex_ipv4='fake_ipv4', ex_vlan='fake_vlan', ex_image='fake_image', ex_deployed=True, ex_started=True, ex_state='fake_state', ex_network='fake_network', ex_network_domain='fake_network_domain') self.assertTrue(isinstance(ret, list)) self.assertEqual(len(ret), 7) node = ret[3] self.assertTrue(isinstance(node.extra['disks'], list)) self.assertTrue(isinstance(node.extra['disks'][0], DimensionDataServerDisk)) self.assertEqual(node.size.id, '1') self.assertEqual(node.image.id, '3ebf3c0f-90fe-4a8b-8585-6e65b316592c') self.assertEqual(node.image.name, 'WIN2008S/32') disk = node.extra['disks'][0] self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a") self.assertEqual(disk.scsi_id, 0) self.assertEqual(disk.size_gb, 50) self.assertEqual(disk.speed, "STANDARD") self.assertEqual(disk.state, "NORMAL") def test_list_nodes_response_LOCATION(self): DimensionDataMockHttp.type = None ret = self.driver.list_locations() first_loc = ret[0] ret = self.driver.list_nodes(ex_location=first_loc) for node in ret: self.assertEqual(node.extra['datacenterId'], 'NA3') def test_list_nodes_response_LOCATION_STR(self): DimensionDataMockHttp.type = None ret = self.driver.list_nodes(ex_location='NA3') for node in ret: self.assertEqual(node.extra['datacenterId'], 'NA3') def test_list_sizes_response(self): DimensionDataMockHttp.type = None ret = self.driver.list_sizes() self.assertEqual(len(ret), 1) size = ret[0] self.assertEqual(size.name, 'default') def test_reboot_node_response(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.reboot() self.assertTrue(ret is True) def test_reboot_node_response_INPROGRESS(self): DimensionDataMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) with self.assertRaises(DimensionDataAPIException): node.reboot() def test_destroy_node_response(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.destroy() self.assertTrue(ret is True) def test_destroy_node_response_RESOURCE_BUSY(self): DimensionDataMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) with self.assertRaises(DimensionDataAPIException): node.destroy() def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 3) self.assertEqual(images[0].name, 'RedHat 6 64-bit 2 CPU') self.assertEqual(images[0].id, 'c14b1a46-2428-44c1-9c1a-b20e6418d08c') self.assertEqual(images[0].extra['location'].id, 'NA9') self.assertEqual(images[0].extra['cpu'].cpu_count, 2) self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64') def test_clean_failed_deployment_response_with_node(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_clean_failed_deployment(node) self.assertTrue(ret is True) def test_clean_failed_deployment_response_with_node_id(self): node = 'e75ead52-692f-4314-8725-c8a4f4d13a87' ret = self.driver.ex_clean_failed_deployment(node) self.assertTrue(ret is True) def test_ex_list_customer_images(self): images = self.driver.ex_list_customer_images() self.assertEqual(len(images), 3) self.assertEqual(images[0].name, 'ImportedCustomerImage') self.assertEqual(images[0].id, '5234e5c7-01de-4411-8b6e-baeb8d91cf5d') self.assertEqual(images[0].extra['location'].id, 'NA9') self.assertEqual(images[0].extra['cpu'].cpu_count, 4) self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64') def test_create_mcp1_node_optional_param(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] network = self.driver.ex_list_networks()[0] cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4', cores_per_socket='2', performance='STANDARD') disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')] node = self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network=network, ex_is_started=False, ex_memory_gb=8, ex_disks=disks, ex_cpu_specification=cpu_spec, ex_primary_dns='10.0.0.5', ex_secondary_dns='10.0.0.6' ) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_mcp1_node_response_no_pass_random_gen(self): image = self.driver.list_images()[0] network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=None, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') self.assertTrue('password' in node.extra) def test_create_mcp1_node_response_no_pass_customer_windows(self): image = self.driver.ex_list_customer_images()[1] network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=None, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') self.assertTrue('password' in node.extra) def test_create_mcp1_node_response_no_pass_customer_windows_STR(self): image = self.driver.ex_list_customer_images()[1].id network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=None, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') self.assertTrue('password' in node.extra) def test_create_mcp1_node_response_no_pass_customer_linux(self): image = self.driver.ex_list_customer_images()[0] network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=None, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') self.assertTrue('password' not in node.extra) def test_create_mcp1_node_response_no_pass_customer_linux_STR(self): image = self.driver.ex_list_customer_images()[0].id network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=None, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') self.assertTrue('password' not in node.extra) def test_create_mcp1_node_response_STR(self): rootPw = 'pass123' image = self.driver.list_images()[0].id network = self.driver.ex_list_networks()[0].id node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network=network, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_response_network_domain(self): rootPw = NodeAuthPassword('pass123') location = self.driver.ex_get_location_by_id('NA9') image = self.driver.list_images(location=location)[0] network_domain = self.driver.ex_list_network_domains(location=location)[0] vlan = self.driver.ex_list_vlans(location=location)[0] cpu = DimensionDataServerCpuSpecification( cpu_count=4, cores_per_socket=1, performance='HIGHPERFORMANCE' ) node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain=network_domain, ex_vlan=vlan, ex_is_started=False, ex_cpu_specification=cpu, ex_memory_gb=4) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_response_network_domain_STR(self): rootPw = NodeAuthPassword('pass123') location = self.driver.ex_get_location_by_id('NA9') image = self.driver.list_images(location=location)[0] network_domain = self.driver.ex_list_network_domains(location=location)[0].id vlan = self.driver.ex_list_vlans(location=location)[0].id cpu = DimensionDataServerCpuSpecification( cpu_count=4, cores_per_socket=1, performance='HIGHPERFORMANCE' ) node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain=network_domain, ex_vlan=vlan, ex_is_started=False, ex_cpu_specification=cpu, ex_memory_gb=4) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_mcp1_node_no_network(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(InvalidRequestError): self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network=None, ex_is_started=False) def test_create_node_mcp1_ipv4(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network='fakenetwork', ex_primary_ipv4='10.0.0.1', ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_mcp1_network(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network='fakenetwork', ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_mcp2_vlan(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_vlan='fakevlan', ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_mcp2_ipv4(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_ipv4='10.0.0.1', ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_network_domain_no_vlan_or_ipv4(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fake_network_domain', ex_is_started=False) def test_create_node_response(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_primary_nic_vlan='fakevlan' ) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_ms_time_zone(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_primary_nic_vlan='fakevlan', ex_microsoft_time_zone='040' ) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_ambigious_mcps_fail(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_network='fakenetwork', ex_primary_nic_vlan='fakevlan' ) def test_create_node_no_network_domain_fail(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node( name='test3', image=image, auth=rootPw, ex_primary_nic_vlan='fakevlan' ) def test_create_node_no_primary_nic_fail(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain' ) def test_create_node_primary_vlan_nic(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_primary_nic_vlan='fakevlan', ex_primary_nic_network_adapter='v1000' ) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_primary_ipv4(self): rootPw = 'pass123' image = self.driver.list_images()[0] node = self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1' ) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_both_primary_nic_and_vlan_fail(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node( name='test3', image=image, auth=rootPw, ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_primary_nic_vlan='fakevlan' ) def test_create_node_cpu_specification(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4', cores_per_socket='2', performance='STANDARD') node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_is_started=False, ex_cpu_specification=cpu_spec) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_memory(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_is_started=False, ex_memory_gb=8) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_disks(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_is_started=False, ex_disks=disks) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_disks_fail(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] disks = 'blah' with self.assertRaises(TypeError): self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_is_started=False, ex_disks=disks) def test_create_node_ipv4_gateway(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_is_started=False, ex_ipv4_gateway='10.2.2.2') self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_network_domain_no_vlan_no_ipv4_fail(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(ValueError): self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fake_network_domain', ex_is_started=False) def test_create_node_mcp2_additional_nics_legacy(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] additional_vlans = ['fakevlan1', 'fakevlan2'] additional_ipv4 = ['10.0.0.2', '10.0.0.3'] node = self.driver.create_node( name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_ipv4='10.0.0.1', ex_additional_nics_vlan=additional_vlans, ex_additional_nics_ipv4=additional_ipv4, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_bad_additional_nics_ipv4(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(TypeError): self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fake_network_domain', ex_vlan='fake_vlan', ex_additional_nics_ipv4='badstring', ex_is_started=False) def test_create_node_additional_nics(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] nic1 = DimensionDataNic(vlan='fake_vlan', network_adapter_name='v1000') nic2 = DimensionDataNic(private_ip_v4='10.1.1.2', network_adapter_name='v1000') additional_nics = [nic1, nic2] node = self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_additional_nics=additional_nics, ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_create_node_additional_nics_vlan_ipv4_coexist_fail(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] nic1 = DimensionDataNic(private_ip_v4='10.1.1.1', vlan='fake_vlan', network_adapter_name='v1000') nic2 = DimensionDataNic(private_ip_v4='10.1.1.2', vlan='fake_vlan2', network_adapter_name='v1000') additional_nics = [nic1, nic2] with self.assertRaises(ValueError): self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_additional_nics=additional_nics, ex_is_started=False ) def test_create_node_additional_nics_invalid_input_fail(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] additional_nics = 'blah' with self.assertRaises(TypeError): self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_additional_nics=additional_nics, ex_is_started=False ) def test_create_node_additional_nics_vlan_ipv4_not_exist_fail(self): root_pw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] nic1 = DimensionDataNic(network_adapter_name='v1000') nic2 = DimensionDataNic(network_adapter_name='v1000') additional_nics = [nic1, nic2] with self.assertRaises(ValueError): self.driver.create_node(name='test2', image=image, auth=root_pw, ex_description='test2 node', ex_network_domain='fakenetworkdomain', ex_primary_nic_private_ipv4='10.0.0.1', ex_additional_nics=additional_nics, ex_is_started=False) def test_create_node_bad_additional_nics_vlan(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] with self.assertRaises(TypeError): self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network_domain='fake_network_domain', ex_vlan='fake_vlan', ex_additional_nics_vlan='badstring', ex_is_started=False) def test_create_node_mcp2_indicate_dns(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test node dns', ex_network_domain='fakenetworkdomain', ex_primary_ipv4='10.0.0.1', ex_primary_dns='8.8.8.8', ex_secondary_dns='8.8.4.4', ex_is_started=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_ex_shutdown_graceful(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_shutdown_graceful(node) self.assertTrue(ret is True) def test_ex_shutdown_graceful_INPROGRESS(self): DimensionDataMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) with self.assertRaises(DimensionDataAPIException): self.driver.ex_shutdown_graceful(node) def test_ex_start_node(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_start_node(node) self.assertTrue(ret is True) def test_ex_start_node_INPROGRESS(self): DimensionDataMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) with self.assertRaises(DimensionDataAPIException): self.driver.ex_start_node(node) def test_ex_power_off(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_power_off(node) self.assertTrue(ret is True) def test_ex_update_vm_tools(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_update_vm_tools(node) self.assertTrue(ret is True) def test_ex_power_off_INPROGRESS(self): DimensionDataMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state='STOPPING', public_ips=None, private_ips=None, driver=self.driver) with self.assertRaises(DimensionDataAPIException): self.driver.ex_power_off(node) def test_ex_reset(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_reset(node) self.assertTrue(ret is True) def test_ex_attach_node_to_vlan(self): node = self.driver.ex_get_node_by_id('e75ead52-692f-4314-8725-c8a4f4d13a87') vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') ret = self.driver.ex_attach_node_to_vlan(node, vlan) self.assertTrue(ret is True) def test_ex_destroy_nic(self): node = self.driver.ex_destroy_nic('a202e51b-41c0-4cfc-add0-b1c62fc0ecf6') self.assertTrue(node) def test_list_networks(self): nets = self.driver.list_networks() self.assertEqual(nets[0].name, 'test-net1') self.assertTrue(isinstance(nets[0].location, NodeLocation)) def test_ex_create_network(self): location = self.driver.ex_get_location_by_id('NA9') net = self.driver.ex_create_network(location, "Test Network", "test") self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e") self.assertEqual(net.name, "Test Network") def test_ex_create_network_NO_DESCRIPTION(self): location = self.driver.ex_get_location_by_id('NA9') net = self.driver.ex_create_network(location, "Test Network") self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e") self.assertEqual(net.name, "Test Network") def test_ex_delete_network(self): net = self.driver.ex_list_networks()[0] result = self.driver.ex_delete_network(net) self.assertTrue(result) def test_ex_rename_network(self): net = self.driver.ex_list_networks()[0] result = self.driver.ex_rename_network(net, "barry") self.assertTrue(result) def test_ex_create_network_domain(self): location = self.driver.ex_get_location_by_id('NA9') plan = NetworkDomainServicePlan.ADVANCED net = self.driver.ex_create_network_domain(location=location, name='test', description='test', service_plan=plan) self.assertEqual(net.name, 'test') self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa') def test_ex_create_network_domain_NO_DESCRIPTION(self): location = self.driver.ex_get_location_by_id('NA9') plan = NetworkDomainServicePlan.ADVANCED net = self.driver.ex_create_network_domain(location=location, name='test', service_plan=plan) self.assertEqual(net.name, 'test') self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa') def test_ex_get_network_domain(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') self.assertEqual(net.id, '8cdfd607-f429-4df6-9352-162cfc0891be') self.assertEqual(net.description, 'test2') self.assertEqual(net.name, 'test') def test_ex_update_network_domain(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') net.name = 'new name' net2 = self.driver.ex_update_network_domain(net) self.assertEqual(net2.name, 'new name') def test_ex_delete_network_domain(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') result = self.driver.ex_delete_network_domain(net) self.assertTrue(result) def test_ex_list_networks(self): nets = self.driver.ex_list_networks() self.assertEqual(nets[0].name, 'test-net1') self.assertTrue(isinstance(nets[0].location, NodeLocation)) def test_ex_list_network_domains(self): nets = self.driver.ex_list_network_domains() self.assertEqual(nets[0].name, 'Aurora') self.assertTrue(isinstance(nets[0].location, NodeLocation)) def test_ex_list_network_domains_ALLFILTERS(self): DimensionDataMockHttp.type = 'ALLFILTERS' nets = self.driver.ex_list_network_domains(location='fake_location', name='fake_name', service_plan='fake_plan', state='fake_state') self.assertEqual(nets[0].name, 'Aurora') self.assertTrue(isinstance(nets[0].location, NodeLocation)) def test_ex_list_vlans(self): vlans = self.driver.ex_list_vlans() self.assertEqual(vlans[0].name, "Primary") def test_ex_list_vlans_ALLFILTERS(self): DimensionDataMockHttp.type = 'ALLFILTERS' vlans = self.driver.ex_list_vlans(location='fake_location', network_domain='fake_network_domain', name='fake_name', ipv4_address='fake_ipv4', ipv6_address='fake_ipv6', state='fake_state') self.assertEqual(vlans[0].name, "Primary") def test_ex_create_vlan(self,): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') vlan = self.driver.ex_create_vlan(network_domain=net, name='test', private_ipv4_base_address='10.3.4.0', private_ipv4_prefix_size='24', description='test vlan') self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8') def test_ex_create_vlan_NO_DESCRIPTION(self,): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') vlan = self.driver.ex_create_vlan(network_domain=net, name='test', private_ipv4_base_address='10.3.4.0', private_ipv4_prefix_size='24') self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8') def test_ex_get_vlan(self): vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8') self.assertEqual(vlan.description, 'test2') self.assertEqual(vlan.status, 'NORMAL') self.assertEqual(vlan.name, 'Production VLAN') self.assertEqual(vlan.private_ipv4_range_address, '10.0.3.0') self.assertEqual(vlan.private_ipv4_range_size, 24) self.assertEqual(vlan.ipv6_range_size, 64) self.assertEqual(vlan.ipv6_range_address, '2607:f480:1111:1153:0:0:0:0') self.assertEqual(vlan.ipv4_gateway, '10.0.3.1') self.assertEqual(vlan.ipv6_gateway, '2607:f480:1111:1153:0:0:0:1') def test_ex_wait_for_state(self): self.driver.ex_wait_for_state('NORMAL', self.driver.ex_get_vlan, vlan_id='0e56433f-d808-4669-821d-812769517ff8') def test_ex_wait_for_state_NODE(self): self.driver.ex_wait_for_state('running', self.driver.ex_get_node_by_id, id='e75ead52-692f-4314-8725-c8a4f4d13a87') def test_ex_wait_for_state_FAIL(self): with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_wait_for_state('starting', self.driver.ex_get_node_by_id, id='e75ead52-692f-4314-8725-c8a4f4d13a87', timeout=2 ) self.assertEqual(context.exception.code, 'running') self.assertTrue('timed out' in context.exception.msg) def test_ex_update_vlan(self): vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') vlan.name = 'new name' vlan2 = self.driver.ex_update_vlan(vlan) self.assertEqual(vlan2.name, 'new name') def test_ex_delete_vlan(self): vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') result = self.driver.ex_delete_vlan(vlan) self.assertTrue(result) def test_ex_expand_vlan(self): vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') vlan.private_ipv4_range_size = '23' vlan = self.driver.ex_expand_vlan(vlan) self.assertEqual(vlan.private_ipv4_range_size, '23') def test_ex_add_public_ip_block_to_network_domain(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') block = self.driver.ex_add_public_ip_block_to_network_domain(net) self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8') def test_ex_list_public_ip_blocks(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') blocks = self.driver.ex_list_public_ip_blocks(net) self.assertEqual(blocks[0].base_ip, '168.128.4.18') self.assertEqual(blocks[0].size, '2') self.assertEqual(blocks[0].id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8') self.assertEqual(blocks[0].location.id, 'NA9') self.assertEqual(blocks[0].network_domain.id, net.id) def test_ex_get_public_ip_block(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8') self.assertEqual(block.base_ip, '168.128.4.18') self.assertEqual(block.size, '2') self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8') self.assertEqual(block.location.id, 'NA9') self.assertEqual(block.network_domain.id, net.id) def test_ex_delete_public_ip_block(self): block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8') result = self.driver.ex_delete_public_ip_block(block) self.assertTrue(result) def test_ex_list_firewall_rules(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) self.assertEqual(rules[0].id, '756cba02-b0bc-48f4-aea5-9445870b6148') self.assertEqual(rules[0].network_domain.id, '8cdfd607-f429-4df6-9352-162cfc0891be') self.assertEqual(rules[0].name, 'CCDEFAULT.BlockOutboundMailIPv4') self.assertEqual(rules[0].action, 'DROP') self.assertEqual(rules[0].ip_version, 'IPV4') self.assertEqual(rules[0].protocol, 'TCP') self.assertEqual(rules[0].source.ip_address, 'ANY') self.assertTrue(rules[0].source.any_ip) self.assertTrue(rules[0].destination.any_ip) def test_ex_create_firewall_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) rule = self.driver.ex_create_firewall_rule(net, rules[0], 'FIRST') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_with_specific_source_ip(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) specific_source_ip_rule = list(filter(lambda x: x.name == 'SpecificSourceIP', rules))[0] rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, 'FIRST') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_with_source_ip(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) specific_source_ip_rule = \ list(filter(lambda x: x.name == 'SpecificSourceIP', rules))[0] specific_source_ip_rule.source.any_ip = False specific_source_ip_rule.source.ip_address = '10.0.0.1' specific_source_ip_rule.source.ip_prefix_size = '15' rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, 'FIRST') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_with_any_ip(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) specific_source_ip_rule = \ list(filter(lambda x: x.name == 'SpecificSourceIP', rules))[0] specific_source_ip_rule.source.any_ip = True rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, 'FIRST') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_ip_prefix_size(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_list_firewall_rules(net)[0] rule.source.address_list_id = None rule.source.any_ip = False rule.source.ip_address = '10.2.1.1' rule.source.ip_prefix_size = '10' rule.destination.address_list_id = None rule.destination.any_ip = False rule.destination.ip_address = '10.0.0.1' rule.destination.ip_prefix_size = '20' self.driver.ex_create_firewall_rule(net, rule, 'LAST') def test_ex_create_firewall_rule_address_list(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_list_firewall_rules(net)[0] rule.source.address_list_id = '12345' rule.destination.address_list_id = '12345' self.driver.ex_create_firewall_rule(net, rule, 'LAST') def test_ex_create_firewall_rule_port_list(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_list_firewall_rules(net)[0] rule.source.port_list_id = '12345' rule.destination.port_list_id = '12345' self.driver.ex_create_firewall_rule(net, rule, 'LAST') def test_ex_create_firewall_rule_port(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_list_firewall_rules(net)[0] rule.source.port_list_id = None rule.source.port_begin = '8000' rule.source.port_end = '8005' rule.destination.port_list_id = None rule.destination.port_begin = '7000' rule.destination.port_end = '7005' self.driver.ex_create_firewall_rule(net, rule, 'LAST') def test_ex_create_firewall_rule_ALL_VALUES(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) for rule in rules: self.driver.ex_create_firewall_rule(net, rule, 'LAST') def test_ex_create_firewall_rule_WITH_POSITION_RULE(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', rules[-1]) self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_WITH_POSITION_RULE_STR(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', 'RULE_WITH_SOURCE_AND_DEST') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_create_firewall_rule_FAIL_POSITION(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) with self.assertRaises(ValueError): self.driver.ex_create_firewall_rule(net, rules[0], 'BEFORE') def test_ex_create_firewall_rule_FAIL_POSITION_WITH_RULE(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_firewall_rules(net) with self.assertRaises(ValueError): self.driver.ex_create_firewall_rule(net, rules[0], 'LAST', 'RULE_WITH_SOURCE_AND_DEST') def test_ex_get_firewall_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') def test_ex_set_firewall_rule_state(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') result = self.driver.ex_set_firewall_rule_state(rule, False) self.assertTrue(result) def test_ex_delete_firewall_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') result = self.driver.ex_delete_firewall_rule(rule) self.assertTrue(result) def test_ex_edit_firewall_rule(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.source.any_ip = True result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_source_ipaddresslist(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.source.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222' rule.source.any_ip = False rule.source.ip_address = '10.0.0.1' rule.source.ip_prefix_size = 10 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_destination_ipaddresslist(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.destination.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222' rule.destination.any_ip = False rule.destination.ip_address = '10.0.0.1' rule.destination.ip_prefix_size = 10 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_destination_ipaddress(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.source.address_list_id = None rule.source.any_ip = False rule.source.ip_address = '10.0.0.1' rule.source.ip_prefix_size = '10' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_source_ipaddress(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.destination.address_list_id = None rule.destination.any_ip = False rule.destination.ip_address = '10.0.0.1' rule.destination.ip_prefix_size = '10' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_with_relative_rule(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') placement_rule = self.driver.ex_list_firewall_rules( network_domain=net)[-1] result = self.driver.ex_edit_firewall_rule( rule=rule, position='BEFORE', relative_rule_for_position=placement_rule) self.assertTrue(result) def test_ex_edit_firewall_rule_with_relative_rule_by_name(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') placement_rule = self.driver.ex_list_firewall_rules( network_domain=net)[-1] result = self.driver.ex_edit_firewall_rule( rule=rule, position='BEFORE', relative_rule_for_position=placement_rule.name) self.assertTrue(result) def test_ex_edit_firewall_rule_source_portlist(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.source.port_list_id = '802abc9f-45a7-4efb-9d5a-810082368222' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_source_port(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.source.port_list_id = None rule.source.port_begin = '3' rule.source.port_end = '10' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_destination_portlist(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.destination.port_list_id = '802abc9f-45a7-4efb-9d5a-810082368222' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_destination_port(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') rule.destination.port_list_id = None rule.destination.port_begin = '3' rule.destination.port_end = '10' result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST') self.assertTrue(result) def test_ex_edit_firewall_rule_invalid_position_fail(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') with self.assertRaises(ValueError): self.driver.ex_edit_firewall_rule(rule=rule, position='BEFORE') def test_ex_edit_firewall_rule_invalid_position_relative_rule_fail(self): net = self.driver.ex_get_network_domain( '8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_firewall_rule( net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c') relative_rule = self.driver.ex_list_firewall_rules( network_domain=net)[-1] with self.assertRaises(ValueError): self.driver.ex_edit_firewall_rule(rule=rule, position='FIRST', relative_rule_for_position=relative_rule) def test_ex_create_nat_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_create_nat_rule(net, '1.2.3.4', '4.3.2.1') self.assertEqual(rule.id, 'd31c2db0-be6b-4d50-8744-9a7a534b5fba') def test_ex_list_nat_rules(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rules = self.driver.ex_list_nat_rules(net) self.assertEqual(rules[0].id, '2187a636-7ebb-49a1-a2ff-5d617f496dce') self.assertEqual(rules[0].internal_ip, '10.0.0.15') self.assertEqual(rules[0].external_ip, '165.180.12.18') def test_ex_get_nat_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce') self.assertEqual(rule.id, '2187a636-7ebb-49a1-a2ff-5d617f496dce') self.assertEqual(rule.internal_ip, '10.0.0.16') self.assertEqual(rule.external_ip, '165.180.12.19') def test_ex_delete_nat_rule(self): net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be') rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce') result = self.driver.ex_delete_nat_rule(rule) self.assertTrue(result) def test_ex_enable_monitoring(self): node = self.driver.list_nodes()[0] result = self.driver.ex_enable_monitoring(node, "ADVANCED") self.assertTrue(result) def test_ex_disable_monitoring(self): node = self.driver.list_nodes()[0] result = self.driver.ex_disable_monitoring(node) self.assertTrue(result) def test_ex_change_monitoring_plan(self): node = self.driver.list_nodes()[0] result = self.driver.ex_update_monitoring_plan(node, "ESSENTIALS") self.assertTrue(result) def test_ex_add_storage_to_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_add_storage_to_node(node, 30, 'PERFORMANCE') self.assertTrue(result) def test_ex_remove_storage_from_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_remove_storage_from_node(node, 0) self.assertTrue(result) def test_ex_change_storage_speed(self): node = self.driver.list_nodes()[0] result = self.driver.ex_change_storage_speed(node, 1, 'PERFORMANCE') self.assertTrue(result) def test_ex_change_storage_size(self): node = self.driver.list_nodes()[0] result = self.driver.ex_change_storage_size(node, 1, 100) self.assertTrue(result) def test_ex_clone_node_to_image(self): node = self.driver.list_nodes()[0] result = self.driver.ex_clone_node_to_image(node, 'my image', 'a description') self.assertTrue(result) def test_ex_update_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_update_node(node, 'my new name', 'a description', 2, 4048) self.assertTrue(result) def test_ex_reconfigure_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_reconfigure_node(node, 4, 4, 1, 'HIGHPERFORMANCE') self.assertTrue(result) def test_ex_get_location_by_id(self): location = self.driver.ex_get_location_by_id('NA9') self.assertTrue(location.id, 'NA9') def test_ex_get_location_by_id_NO_LOCATION(self): location = self.driver.ex_get_location_by_id(None) self.assertIsNone(location) def test_ex_get_base_image_by_id(self): image_id = self.driver.list_images()[0].id image = self.driver.ex_get_base_image_by_id(image_id) self.assertEqual(image.extra['OS_type'], 'UNIX') def test_ex_get_customer_image_by_id(self): image_id = self.driver.ex_list_customer_images()[1].id image = self.driver.ex_get_customer_image_by_id(image_id) self.assertEqual(image.extra['OS_type'], 'WINDOWS') def test_ex_get_image_by_id_base_img(self): image_id = self.driver.list_images()[1].id image = self.driver.ex_get_base_image_by_id(image_id) self.assertEqual(image.extra['OS_type'], 'WINDOWS') def test_ex_get_image_by_id_customer_img(self): image_id = self.driver.ex_list_customer_images()[0].id image = self.driver.ex_get_customer_image_by_id(image_id) self.assertEqual(image.extra['OS_type'], 'UNIX') def test_ex_get_image_by_id_customer_FAIL(self): image_id = 'FAKE_IMAGE_ID' with self.assertRaises(DimensionDataAPIException): self.driver.ex_get_base_image_by_id(image_id) def test_ex_create_anti_affinity_rule(self): node_list = self.driver.list_nodes() success = self.driver.ex_create_anti_affinity_rule([node_list[0], node_list[1]]) self.assertTrue(success) def test_ex_create_anti_affinity_rule_TUPLE(self): node_list = self.driver.list_nodes() success = self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1])) self.assertTrue(success) def test_ex_create_anti_affinity_rule_TUPLE_STR(self): node_list = self.driver.list_nodes() success = self.driver.ex_create_anti_affinity_rule((node_list[0].id, node_list[1].id)) self.assertTrue(success) def test_ex_create_anti_affinity_rule_FAIL_STR(self): node_list = 'string' with self.assertRaises(TypeError): self.driver.ex_create_anti_affinity_rule(node_list) def test_ex_create_anti_affinity_rule_FAIL_EXISTING(self): node_list = self.driver.list_nodes() DimensionDataMockHttp.type = 'FAIL_EXISTING' with self.assertRaises(DimensionDataAPIException): self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1])) def test_ex_delete_anti_affinity_rule(self): net_domain = self.driver.ex_list_network_domains()[0] rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0] success = self.driver.ex_delete_anti_affinity_rule(rule) self.assertTrue(success) def test_ex_delete_anti_affinity_rule_STR(self): net_domain = self.driver.ex_list_network_domains()[0] rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0] success = self.driver.ex_delete_anti_affinity_rule(rule.id) self.assertTrue(success) def test_ex_delete_anti_affinity_rule_FAIL(self): net_domain = self.driver.ex_list_network_domains()[0] rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0] DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException): self.driver.ex_delete_anti_affinity_rule(rule) def test_ex_list_anti_affinity_rules_NETWORK_DOMAIN(self): net_domain = self.driver.ex_list_network_domains()[0] rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain) self.assertTrue(isinstance(rules, list)) self.assertEqual(len(rules), 2) self.assertTrue(isinstance(rules[0].id, str)) self.assertTrue(isinstance(rules[0].node_list, list)) def test_ex_list_anti_affinity_rules_NETWORK(self): network = self.driver.list_networks()[0] rules = self.driver.ex_list_anti_affinity_rules(network=network) self.assertTrue(isinstance(rules, list)) self.assertEqual(len(rules), 2) self.assertTrue(isinstance(rules[0].id, str)) self.assertTrue(isinstance(rules[0].node_list, list)) def test_ex_list_anti_affinity_rules_NODE(self): node = self.driver.list_nodes()[0] rules = self.driver.ex_list_anti_affinity_rules(node=node) self.assertTrue(isinstance(rules, list)) self.assertEqual(len(rules), 2) self.assertTrue(isinstance(rules[0].id, str)) self.assertTrue(isinstance(rules[0].node_list, list)) def test_ex_list_anti_affinity_rules_PAGINATED(self): net_domain = self.driver.ex_list_network_domains()[0] DimensionDataMockHttp.type = 'PAGINATED' rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain) self.assertTrue(isinstance(rules, list)) self.assertEqual(len(rules), 4) self.assertTrue(isinstance(rules[0].id, str)) self.assertTrue(isinstance(rules[0].node_list, list)) def test_ex_list_anti_affinity_rules_ALLFILTERS(self): net_domain = self.driver.ex_list_network_domains()[0] DimensionDataMockHttp.type = 'ALLFILTERS' rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain, filter_id='FAKE_ID', filter_state='FAKE_STATE') self.assertTrue(isinstance(rules, list)) self.assertEqual(len(rules), 2) self.assertTrue(isinstance(rules[0].id, str)) self.assertTrue(isinstance(rules[0].node_list, list)) def test_ex_list_anti_affinity_rules_BAD_ARGS(self): with self.assertRaises(ValueError): self.driver.ex_list_anti_affinity_rules(network='fake_network', network_domain='fake_network_domain') def test_ex_create_tag_key(self): success = self.driver.ex_create_tag_key('MyTestKey') self.assertTrue(success) def test_ex_create_tag_key_ALLPARAMS(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'ALLPARAMS' success = self.driver.ex_create_tag_key('MyTestKey', description="Test Key Desc.", value_required=False, display_on_report=False) self.assertTrue(success) def test_ex_create_tag_key_BADREQUEST(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'BADREQUEST' with self.assertRaises(DimensionDataAPIException): self.driver.ex_create_tag_key('MyTestKey') def test_ex_list_tag_keys(self): tag_keys = self.driver.ex_list_tag_keys() self.assertTrue(isinstance(tag_keys, list)) self.assertTrue(isinstance(tag_keys[0], DimensionDataTagKey)) self.assertTrue(isinstance(tag_keys[0].id, str)) def test_ex_list_tag_keys_ALLFILTERS(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'ALLFILTERS' self.driver.ex_list_tag_keys(id='fake_id', name='fake_name', value_required=False, display_on_report=False) def test_ex_get_tag_by_id(self): tag = self.driver.ex_get_tag_key_by_id('d047c609-93d7-4bc5-8fc9-732c85840075') self.assertTrue(isinstance(tag, DimensionDataTagKey)) def test_ex_get_tag_by_id_NOEXIST(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'NOEXIST' with self.assertRaises(DimensionDataAPIException): self.driver.ex_get_tag_key_by_id('d047c609-93d7-4bc5-8fc9-732c85840075') def test_ex_get_tag_by_name(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'SINGLE' tag = self.driver.ex_get_tag_key_by_name('LibcloudTest') self.assertTrue(isinstance(tag, DimensionDataTagKey)) def test_ex_get_tag_by_name_NOEXIST(self): with self.assertRaises(ValueError): self.driver.ex_get_tag_key_by_name('LibcloudTest') def test_ex_modify_tag_key_NAME(self): tag_key = self.driver.ex_list_tag_keys()[0] DimensionDataMockHttp.type = 'NAME' success = self.driver.ex_modify_tag_key(tag_key, name='NewName') self.assertTrue(success) def test_ex_modify_tag_key_NOTNAME(self): tag_key = self.driver.ex_list_tag_keys()[0] DimensionDataMockHttp.type = 'NOTNAME' success = self.driver.ex_modify_tag_key(tag_key, description='NewDesc', value_required=False, display_on_report=True) self.assertTrue(success) def test_ex_modify_tag_key_NOCHANGE(self): tag_key = self.driver.ex_list_tag_keys()[0] DimensionDataMockHttp.type = 'NOCHANGE' with self.assertRaises(DimensionDataAPIException): self.driver.ex_modify_tag_key(tag_key) def test_ex_remove_tag_key(self): tag_key = self.driver.ex_list_tag_keys()[0] success = self.driver.ex_remove_tag_key(tag_key) self.assertTrue(success) def test_ex_remove_tag_key_NOEXIST(self): tag_key = self.driver.ex_list_tag_keys()[0] DimensionDataMockHttp.type = 'NOEXIST' with self.assertRaises(DimensionDataAPIException): self.driver.ex_remove_tag_key(tag_key) def test_ex_apply_tag_to_asset(self): node = self.driver.list_nodes()[0] success = self.driver.ex_apply_tag_to_asset(node, 'TagKeyName', 'FakeValue') self.assertTrue(success) def test_ex_apply_tag_to_asset_NOVALUE(self): node = self.driver.list_nodes()[0] DimensionDataMockHttp.type = 'NOVALUE' success = self.driver.ex_apply_tag_to_asset(node, 'TagKeyName') self.assertTrue(success) def test_ex_apply_tag_to_asset_NOTAGKEY(self): node = self.driver.list_nodes()[0] DimensionDataMockHttp.type = 'NOTAGKEY' with self.assertRaises(DimensionDataAPIException): self.driver.ex_apply_tag_to_asset(node, 'TagKeyNam') def test_ex_apply_tag_to_asset_BADASSETTYPE(self): network = self.driver.list_networks()[0] DimensionDataMockHttp.type = 'NOTAGKEY' with self.assertRaises(TypeError): self.driver.ex_apply_tag_to_asset(network, 'TagKeyNam') def test_ex_remove_tag_from_asset(self): node = self.driver.list_nodes()[0] success = self.driver.ex_remove_tag_from_asset(node, 'TagKeyName') self.assertTrue(success) def test_ex_remove_tag_from_asset_NOTAG(self): node = self.driver.list_nodes()[0] DimensionDataMockHttp.type = 'NOTAG' with self.assertRaises(DimensionDataAPIException): self.driver.ex_remove_tag_from_asset(node, 'TagKeyNam') def test_ex_list_tags(self): tags = self.driver.ex_list_tags() self.assertTrue(isinstance(tags, list)) self.assertTrue(isinstance(tags[0], DimensionDataTag)) self.assertTrue(len(tags) == 3) def test_ex_list_tags_ALLPARAMS(self): self.driver.connection._get_orgId() DimensionDataMockHttp.type = 'ALLPARAMS' tags = self.driver.ex_list_tags(asset_id='fake_asset_id', asset_type='fake_asset_type', location='fake_location', tag_key_name='fake_tag_key_name', tag_key_id='fake_tag_key_id', value='fake_value', value_required=False, display_on_report=False) self.assertTrue(isinstance(tags, list)) self.assertTrue(isinstance(tags[0], DimensionDataTag)) self.assertTrue(len(tags) == 3) def test_priv_location_to_location_id(self): location = self.driver.ex_get_location_by_id('NA9') self.assertEqual( self.driver._location_to_location_id(location), 'NA9' ) def test_priv_location_to_location_id_STR(self): self.assertEqual( self.driver._location_to_location_id('NA9'), 'NA9' ) def test_priv_location_to_location_id_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._location_to_location_id([1, 2, 3]) def test_priv_image_needs_auth_os_img(self): image = self.driver.list_images()[1] self.assertTrue(self.driver._image_needs_auth(image)) def test_priv_image_needs_auth_os_img_STR(self): image = self.driver.list_images()[1].id self.assertTrue(self.driver._image_needs_auth(image)) def test_priv_image_needs_auth_cust_img_windows(self): image = self.driver.ex_list_customer_images()[1] self.assertTrue(self.driver._image_needs_auth(image)) def test_priv_image_needs_auth_cust_img_windows_STR(self): image = self.driver.ex_list_customer_images()[1].id self.assertTrue(self.driver._image_needs_auth(image)) def test_priv_image_needs_auth_cust_img_linux(self): image = self.driver.ex_list_customer_images()[0] self.assertTrue(not self.driver._image_needs_auth(image)) def test_priv_image_needs_auth_cust_img_linux_STR(self): image = self.driver.ex_list_customer_images()[0].id self.assertTrue(not self.driver._image_needs_auth(image)) def test_summary_usage_report(self): report = self.driver.ex_summary_usage_report('2016-06-01', '2016-06-30') report_content = report self.assertEqual(len(report_content), 13) self.assertEqual(len(report_content[0]), 6) def test_detailed_usage_report(self): report = self.driver.ex_detailed_usage_report('2016-06-01', '2016-06-30') report_content = report self.assertEqual(len(report_content), 42) self.assertEqual(len(report_content[0]), 4) def test_audit_log_report(self): report = self.driver.ex_audit_log_report('2016-06-01', '2016-06-30') report_content = report self.assertEqual(len(report_content), 25) self.assertEqual(report_content[2][2], 'OEC_SYSTEM') def test_ex_list_ip_address_list(self): net_domain = self.driver.ex_list_network_domains()[0] ip_list = self.driver.ex_list_ip_address_list( ex_network_domain=net_domain) self.assertTrue(isinstance(ip_list, list)) self.assertEqual(len(ip_list), 4) self.assertTrue(isinstance(ip_list[0].name, str)) self.assertTrue(isinstance(ip_list[0].description, str)) self.assertTrue(isinstance(ip_list[0].ip_version, str)) self.assertTrue(isinstance(ip_list[0].state, str)) self.assertTrue(isinstance(ip_list[0].create_time, str)) self.assertTrue(isinstance(ip_list[0].child_ip_address_lists, list)) self.assertEqual(len(ip_list[1].child_ip_address_lists), 1) self.assertTrue(isinstance(ip_list[1].child_ip_address_lists[0].name, str)) def test_ex_get_ip_address_list(self): net_domain = self.driver.ex_list_network_domains()[0] DimensionDataMockHttp.type = 'FILTERBYNAME' ip_list = self.driver.ex_get_ip_address_list( ex_network_domain=net_domain.id, ex_ip_address_list_name='Test_IP_Address_List_3') self.assertTrue(isinstance(ip_list, list)) self.assertEqual(len(ip_list), 1) self.assertTrue(isinstance(ip_list[0].name, str)) self.assertTrue(isinstance(ip_list[0].description, str)) self.assertTrue(isinstance(ip_list[0].ip_version, str)) self.assertTrue(isinstance(ip_list[0].state, str)) self.assertTrue(isinstance(ip_list[0].create_time, str)) ips = ip_list[0].ip_address_collection self.assertEqual(len(ips), 3) self.assertTrue(isinstance(ips[0].begin, str)) self.assertTrue(isinstance(ips[0].prefix_size, str)) self.assertTrue(isinstance(ips[2].end, str)) def test_ex_create_ip_address_list_FAIL(self): net_domain = self.driver.ex_list_network_domains()[0] with self.assertRaises(TypeError): self.driver.ex_create_ip_address_list( ex_network_domain=net_domain.id) def test_ex_create_ip_address_list(self): name = "Test_IP_Address_List_3" description = "Test Description" ip_version = "IPV4" child_ip_address_list_id = '0291ef78-4059-4bc1-b433-3f6ad698dc41' child_ip_address_list = DimensionDataChildIpAddressList( id=child_ip_address_list_id, name="test_child_ip_addr_list") net_domain = self.driver.ex_list_network_domains()[0] ip_address_1 = DimensionDataIpAddress(begin='190.2.2.100') ip_address_2 = DimensionDataIpAddress(begin='190.2.2.106', end='190.2.2.108') ip_address_3 = DimensionDataIpAddress(begin='190.2.2.0', prefix_size='24') ip_address_collection = [ip_address_1, ip_address_2, ip_address_3] # Create IP Address List success = self.driver.ex_create_ip_address_list( ex_network_domain=net_domain, name=name, ip_version=ip_version, description=description, ip_address_collection=ip_address_collection, child_ip_address_list=child_ip_address_list) self.assertTrue(success) def test_ex_create_ip_address_list_STR(self): name = "Test_IP_Address_List_3" description = "Test Description" ip_version = "IPV4" child_ip_address_list_id = '0291ef78-4059-4bc1-b433-3f6ad698dc41' net_domain = self.driver.ex_list_network_domains()[0] ip_address_1 = DimensionDataIpAddress(begin='190.2.2.100') ip_address_2 = DimensionDataIpAddress(begin='190.2.2.106', end='190.2.2.108') ip_address_3 = DimensionDataIpAddress(begin='190.2.2.0', prefix_size='24') ip_address_collection = [ip_address_1, ip_address_2, ip_address_3] # Create IP Address List success = self.driver.ex_create_ip_address_list( ex_network_domain=net_domain.id, name=name, ip_version=ip_version, description=description, ip_address_collection=ip_address_collection, child_ip_address_list=child_ip_address_list_id) self.assertTrue(success) def test_ex_edit_ip_address_list(self): ip_address_1 = DimensionDataIpAddress(begin='190.2.2.111') ip_address_collection = [ip_address_1] child_ip_address_list = DimensionDataChildIpAddressList( id='2221ef78-4059-4bc1-b433-3f6ad698dc41', name="test_child_ip_address_list edited") ip_address_list = DimensionDataIpAddressList( id='1111ef78-4059-4bc1-b433-3f6ad698d111', name="test ip address list edited", ip_version="IPv4", description="test", ip_address_collection=ip_address_collection, child_ip_address_lists=child_ip_address_list, state="NORMAL", create_time='2015-09-29T02:49:45' ) success = self.driver.ex_edit_ip_address_list( ex_ip_address_list=ip_address_list, description="test ip address list", ip_address_collection=ip_address_collection, child_ip_address_lists=child_ip_address_list ) self.assertTrue(success) def test_ex_edit_ip_address_list_STR(self): ip_address_1 = DimensionDataIpAddress(begin='190.2.2.111') ip_address_collection = [ip_address_1] child_ip_address_list = DimensionDataChildIpAddressList( id='2221ef78-4059-4bc1-b433-3f6ad698dc41', name="test_child_ip_address_list edited") success = self.driver.ex_edit_ip_address_list( ex_ip_address_list='84e34850-595d- 436e-a885-7cd37edb24a4', description="test ip address list", ip_address_collection=ip_address_collection, child_ip_address_lists=child_ip_address_list ) self.assertTrue(success) def test_ex_delete_ip_address_list(self): child_ip_address_list = DimensionDataChildIpAddressList( id='2221ef78-4059-4bc1-b433-3f6ad698dc41', name="test_child_ip_address_list edited") ip_address_list = DimensionDataIpAddressList( id='1111ef78-4059-4bc1-b433-3f6ad698d111', name="test ip address list edited", ip_version="IPv4", description="test", ip_address_collection=None, child_ip_address_lists=child_ip_address_list, state="NORMAL", create_time='2015-09-29T02:49:45' ) success = self.driver.ex_delete_ip_address_list( ex_ip_address_list=ip_address_list) self.assertTrue(success) def test_ex_delete_ip_address_list_STR(self): success = self.driver.ex_delete_ip_address_list( ex_ip_address_list='111ef78-4059-4bc1-b433-3f6ad698d111') self.assertTrue(success) def test_ex_list_portlist(self): net_domain = self.driver.ex_list_network_domains()[0] portlist = self.driver.ex_list_portlist( ex_network_domain=net_domain) self.assertTrue(isinstance(portlist, list)) self.assertEqual(len(portlist), 3) self.assertTrue(isinstance(portlist[0].name, str)) self.assertTrue(isinstance(portlist[0].description, str)) self.assertTrue(isinstance(portlist[0].state, str)) self.assertTrue(isinstance(portlist[0].port_collection, list)) self.assertTrue(isinstance(portlist[0].port_collection[0].begin, str)) self.assertTrue(isinstance(portlist[0].port_collection[0].end, str)) self.assertTrue(isinstance(portlist[0].child_portlist_list, list)) self.assertTrue(isinstance(portlist[0].child_portlist_list[0].id, str)) self.assertTrue(isinstance(portlist[0].child_portlist_list[0].name, str)) self.assertTrue(isinstance(portlist[0].create_time, str)) def test_ex_get_port_list(self): net_domain = self.driver.ex_list_network_domains()[0] portlist_id = self.driver.ex_list_portlist( ex_network_domain=net_domain)[0].id portlist = self.driver.ex_get_portlist( ex_portlist_id=portlist_id) self.assertTrue(isinstance(portlist, DimensionDataPortList)) self.assertTrue(isinstance(portlist.name, str)) self.assertTrue(isinstance(portlist.description, str)) self.assertTrue(isinstance(portlist.state, str)) self.assertTrue(isinstance(portlist.port_collection, list)) self.assertTrue(isinstance(portlist.port_collection[0].begin, str)) self.assertTrue(isinstance(portlist.port_collection[0].end, str)) self.assertTrue(isinstance(portlist.child_portlist_list, list)) self.assertTrue(isinstance(portlist.child_portlist_list[0].id, str)) self.assertTrue(isinstance(portlist.child_portlist_list[0].name, str)) self.assertTrue(isinstance(portlist.create_time, str)) def test_ex_get_portlist_STR(self): net_domain = self.driver.ex_list_network_domains()[0] portlist = self.driver.ex_list_portlist( ex_network_domain=net_domain)[0] port_list = self.driver.ex_get_portlist( ex_portlist_id=portlist.id) self.assertTrue(isinstance(port_list, DimensionDataPortList)) self.assertTrue(isinstance(port_list.name, str)) self.assertTrue(isinstance(port_list.description, str)) self.assertTrue(isinstance(port_list.state, str)) self.assertTrue(isinstance(port_list.port_collection, list)) self.assertTrue(isinstance(port_list.port_collection[0].begin, str)) self.assertTrue(isinstance(port_list.port_collection[0].end, str)) self.assertTrue(isinstance(port_list.child_portlist_list, list)) self.assertTrue(isinstance(port_list.child_portlist_list[0].id, str)) self.assertTrue(isinstance(port_list.child_portlist_list[0].name, str)) self.assertTrue(isinstance(port_list.create_time, str)) def test_ex_create_portlist_NOCHILDPORTLIST(self): name = "Test_Port_List" description = "Test Description" net_domain = self.driver.ex_list_network_domains()[0] port_1 = DimensionDataPort(begin='8080') port_2 = DimensionDataIpAddress(begin='8899', end='9023') port_collection = [port_1, port_2] # Create IP Address List success = self.driver.ex_create_portlist( ex_network_domain=net_domain, name=name, description=description, port_collection=port_collection ) self.assertTrue(success) def test_ex_create_portlist(self): name = "Test_Port_List" description = "Test Description" net_domain = self.driver.ex_list_network_domains()[0] port_1 = DimensionDataPort(begin='8080') port_2 = DimensionDataIpAddress(begin='8899', end='9023') port_collection = [port_1, port_2] child_port_1 = DimensionDataChildPortList( id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1') child_port_2 = DimensionDataChildPortList( id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2') child_ports = [child_port_1, child_port_2] # Create IP Address List success = self.driver.ex_create_portlist( ex_network_domain=net_domain, name=name, description=description, port_collection=port_collection, child_portlist_list=child_ports ) self.assertTrue(success) def test_ex_create_portlist_STR(self): name = "Test_Port_List" description = "Test Description" net_domain = self.driver.ex_list_network_domains()[0] port_1 = DimensionDataPort(begin='8080') port_2 = DimensionDataIpAddress(begin='8899', end='9023') port_collection = [port_1, port_2] child_port_1 = DimensionDataChildPortList( id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1') child_port_2 = DimensionDataChildPortList( id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2') child_ports_ids = [child_port_1.id, child_port_2.id] # Create IP Address List success = self.driver.ex_create_portlist( ex_network_domain=net_domain.id, name=name, description=description, port_collection=port_collection, child_portlist_list=child_ports_ids ) self.assertTrue(success) def test_ex_edit_portlist(self): net_domain = self.driver.ex_list_network_domains()[0] portlist = self.driver.ex_list_portlist(net_domain)[0] description = "Test Description" port_1 = DimensionDataPort(begin='8080') port_2 = DimensionDataIpAddress(begin='8899', end='9023') port_collection = [port_1, port_2] child_port_1 = DimensionDataChildPortList( id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1') child_port_2 = DimensionDataChildPortList( id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2') child_ports = [child_port_1.id, child_port_2.id] # Create IP Address List success = self.driver.ex_edit_portlist( ex_portlist=portlist, description=description, port_collection=port_collection, child_portlist_list=child_ports ) self.assertTrue(success) def test_ex_edit_portlist_STR(self): portlist_id = "484174a2-ae74-4658-9e56-50fc90e086cf" description = "Test Description" port_1 = DimensionDataPort(begin='8080') port_2 = DimensionDataIpAddress(begin='8899', end='9023') port_collection = [port_1, port_2] child_port_1 = DimensionDataChildPortList( id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1') child_port_2 = DimensionDataChildPortList( id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2') child_ports_ids = [child_port_1.id, child_port_2.id] # Create IP Address List success = self.driver.ex_edit_portlist( ex_portlist=portlist_id, description=description, port_collection=port_collection, child_portlist_list=child_ports_ids ) self.assertTrue(success) def test_ex_delete_portlist(self): net_domain = self.driver.ex_list_network_domains()[0] portlist = self.driver.ex_list_portlist(net_domain)[0] success = self.driver.ex_delete_portlist( ex_portlist=portlist) self.assertTrue(success) def test_ex_delete_portlist_STR(self): net_domain = self.driver.ex_list_network_domains()[0] portlist = self.driver.ex_list_portlist(net_domain)[0] success = self.driver.ex_delete_portlist( ex_portlist=portlist.id) self.assertTrue(success) def test_import_image(self): tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None} success = self.driver.import_image( ovf_package_name='aTestGocToNGoc2_export2.mf', name='Libcloud NGOCImage_New 2', description='test', cluster_id='QA1_N2_VMWARE_1-01', is_guest_os_customization='false', tagkey_name_value_dictionaries=tag_dictionaries) self.assertTrue(success) def test_import_image_error_too_many_choice(self): tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None} with self.assertRaises(ValueError): self.driver.import_image( ovf_package_name='aTestGocToNGoc2_export2.mf', name='Libcloud NGOCImage_New 2', description='test', cluster_id='QA1_N2_VMWARE_1-01', datacenter_id='QA1_N1_VMWARE_1', is_guest_os_customization='false', tagkey_name_value_dictionaries=tag_dictionaries) def test_import_image_error_missing_choice(self): tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None} with self.assertRaises(ValueError): self.driver.import_image( ovf_package_name='aTestGocToNGoc2_export2.mf', name='Libcloud NGOCImage_New 2', description='test', cluster_id=None, datacenter_id=None, is_guest_os_customization='false', tagkey_name_value_dictionaries=tag_dictionaries) def test_exchange_nic_vlans(self): success = self.driver.ex_exchange_nic_vlans( nic_id_1='a4b4b42b-ccb5-416f-b052-ce7cb7fdff12', nic_id_2='b39d09b8-ea65-424a-8fa6-c6f5a98afc69') self.assertTrue(success) def test_change_nic_network_adapter(self): success = self.driver.ex_change_nic_network_adapter( nic_id='0c55c269-20a5-4fec-8054-22a245a48fe4', network_adapter_name='E1000') self.assertTrue(success) def test_ex_create_node_uncustomized_mcp2_using_vlan(self): # Get VLAN vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8') # Create node using vlan instead of private IPv4 node = self.driver.ex_create_node_uncustomized( name='test_server_05', image='fake_customer_image', ex_network_domain='fakenetworkdomain', ex_is_started=False, ex_description=None, ex_cluster_id=None, ex_cpu_specification=None, ex_memory_gb=None, ex_primary_nic_private_ipv4=None, ex_primary_nic_vlan=vlan, ex_primary_nic_network_adapter=None, ex_additional_nics=None, ex_disks=None, ex_tagid_value_pairs=None, ex_tagname_value_pairs=None) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') def test_ex_create_node_uncustomized_mcp2_using_ipv4(self): node = self.driver.ex_create_node_uncustomized( name='test_server_05', image='fake_customer_image', ex_network_domain='fakenetworkdomain', ex_is_started=False, ex_description=None, ex_cluster_id=None, ex_cpu_specification=None, ex_memory_gb=None, ex_primary_nic_private_ipv4='10.0.0.1', ex_primary_nic_vlan=None, ex_primary_nic_network_adapter=None, ex_additional_nics=None, ex_disks=None, ex_tagid_value_pairs=None, ex_tagname_value_pairs=None) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') class InvalidRequestError(Exception): def __init__(self, tag): super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag) class DimensionDataMockHttp(MockHttp): fixtures = ComputeFileFixtures('dimensiondata') def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usage(self, method, url, body, headers): body = self.fixtures.load( 'summary_usage_report.csv' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usageDetailed(self, method, url, body, headers): body = self.fixtures.load( 'detailed_usage_report.csv' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_auditlog(self, method, url, body, headers): body = self.fixtures.load( 'audit_log.csv' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_PAGINATED(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_ALLFILTERS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_base_image(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_base_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers): body = None action = url.split('?')[-1] if action == 'restart': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml') elif action == 'shutdown': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml') elif action == 'delete': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml') elif action == 'start': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml') elif action == 'poweroff': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers): body = None action = url.split('?')[-1] if action == 'restart': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml') elif action == 'shutdown': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml') elif action == 'delete': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml') elif action == 'start': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml') elif action == 'poweroff': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): body = self.fixtures.load( '_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers): if method is "POST": request = ET.fromstring(body) if request.tag != "{http://oec.api.opsource.net/schemas/network}NewNetworkWithLocation": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation_NA9(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1(self, method, url, body, headers): action = url.split('?')[-1] if action == 'delete': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_FAIL_EXISTING(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418_FAIL(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): body = self.fixtures.load( 'server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_deleteServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_deleteServer_RESOURCEBUSY.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}rebootServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_rebootServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}rebootServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_rebootServer_RESOURCEBUSY.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers): if url.endswith('datacenterId=NA3'): body = self.fixtures.load( '2.4/server_server_NA3.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( '2.4/server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGESIZE50(self, method, url, body, headers): if not url.endswith('pageSize=50'): raise ValueError("pageSize is not set as expected") body = self.fixtures.load( '2.4/server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_EMPTY(self, method, url, body, headers): body = self.fixtures.load( 'server_server_paginated_empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGED_THEN_EMPTY(self, method, url, body, headers): if 'pageNumber=2' in url: body = self.fixtures.load( 'server_server_paginated_empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: body = self.fixtures.load( '2.4/server_server_paginated.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATED(self, method, url, body, headers): if 'pageNumber=2' in url: body = self.fixtures.load( '2.4/server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: body = self.fixtures.load( '2.4/server_server_paginated.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATEDEMPTY(self, method, url, body, headers): body = self.fixtures.load( 'server_server_paginated_empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'datacenterId': assert value == 'fake_loc' elif key == 'networkId': assert value == 'fake_network' elif key == 'networkDomainId': assert value == 'fake_network_domain' elif key == 'vlanId': assert value == 'fake_vlan' elif key == 'ipv6': assert value == 'fake_ipv6' elif key == 'privateIpv4': assert value == 'fake_ipv4' elif key == 'name': assert value == 'fake_name' elif key == 'state': assert value == 'fake_state' elif key == 'started': assert value == 'True' elif key == 'deployed': assert value == 'True' elif key == 'sourceImageId': assert value == 'fake_image' else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( '2.4/server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule(self, method, url, body, headers): body = self.fixtures.load( 'server_antiAffinityRule_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_ALLFILTERS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'id': assert value == 'FAKE_ID' elif key == 'state': assert value == 'FAKE_STATE' elif key == 'pageSize': assert value == '250' elif key == 'networkDomainId': pass else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( 'server_antiAffinityRule_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_PAGINATED(self, method, url, body, headers): if 'pageNumber=2' in url: body = self.fixtures.load( 'server_antiAffinityRule_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: body = self.fixtures.load( 'server_antiAffinityRule_list_PAGINATED.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers): if url.endswith('id=NA9'): body = self.fixtures.load( 'infrastructure_datacenter_NA9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( 'infrastructure_datacenter.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter_ALLFILTERS(self, method, url, body, headers): if url.endswith('id=NA9'): body = self.fixtures.load( 'infrastructure_datacenter_NA9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( 'infrastructure_datacenter.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_updateVmwareTools(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}updateVmwareTools": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_updateVmwareTools.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}startServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_startServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}startServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_startServer_INPROGRESS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_shutdownServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_shutdownServer_INPROGRESS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}resetServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_resetServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_powerOffServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_powerOffServer_INPROGRESS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_11_INPROGRESS( self, method, url, body, headers): body = self.fixtures.load('2.4/server_GetServer.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(self, method, url, body, headers): body = self.fixtures.load( 'network_networkDomain.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_ALLFILTERS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'datacenterId': assert value == 'fake_location' elif key == 'type': assert value == 'fake_plan' elif key == 'name': assert value == 'fake_name' elif key == 'state': assert value == 'fake_state' else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( 'network_networkDomain.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(self, method, url, body, headers): body = self.fixtures.load( 'network_vlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_ALLFILTERS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'datacenterId': assert value == 'fake_location' elif key == 'networkDomainId': assert value == 'fake_network_domain' elif key == 'ipv6Address': assert value == 'fake_ipv6' elif key == 'privateIpv4Address': assert value == 'fake_ipv4' elif key == 'name': assert value == 'fake_name' elif key == 'state': assert value == 'fake_state' else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( 'network_vlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deployServer": raise InvalidRequestError(request.tag) # Make sure the we either have a network tag with an IP or networkId # Or Network info with a primary nic that has privateip or vlanid network = request.find(fixxpath('network', TYPES_URN)) network_info = request.find(fixxpath('networkInfo', TYPES_URN)) if network is not None: if network_info is not None: raise InvalidRequestError("Request has both MCP1 and MCP2 values") ipv4 = findtext(network, 'privateIpv4', TYPES_URN) networkId = findtext(network, 'networkId', TYPES_URN) if ipv4 is None and networkId is None: raise InvalidRequestError('Invalid request MCP1 requests need privateIpv4 or networkId') elif network_info is not None: if network is not None: raise InvalidRequestError("Request has both MCP1 and MCP2 values") primary_nic = network_info.find(fixxpath('primaryNic', TYPES_URN)) ipv4 = findtext(primary_nic, 'privateIpv4', TYPES_URN) vlanId = findtext(primary_nic, 'vlanId', TYPES_URN) if ipv4 is None and vlanId is None: raise InvalidRequestError('Invalid request MCP2 requests need privateIpv4 or vlanId') else: raise InvalidRequestError('Invalid request, does not have network or network_info in XML') body = self.fixtures.load( 'server_deployServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers): body = self.fixtures.load( '2.4/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deployNetworkDomain": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deployNetworkDomain.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be(self, method, url, body, headers): body = self.fixtures.load( 'network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be_ALLFILTERS(self, method, url, body, headers): body = self.fixtures.load( 'network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}editNetworkDomain": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_editNetworkDomain.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteNetworkDomain": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deleteNetworkDomain.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deployVlan": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deployVlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8(self, method, url, body, headers): body = self.fixtures.load( 'network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}editVlan": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_editVlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteVlan": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deleteVlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}expandVlan": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_expandVlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}addPublicIpBlock": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_addPublicIpBlock.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba(self, method, url, body, headers): body = self.fixtures.load( 'network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock(self, method, url, body, headers): body = self.fixtures.load( 'network_publicIpBlock.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8(self, method, url, body, headers): body = self.fixtures.load( 'network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}removePublicIpBlock": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_removePublicIpBlock.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule(self, method, url, body, headers): body = self.fixtures.load( 'network_firewallRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}createFirewallRule": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_createFirewallRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c(self, method, url, body, headers): body = self.fixtures.load( 'network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}editFirewallRule": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_editFirewallRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteFirewallRule": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deleteFirewallRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}createNatRule": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_createNatRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule(self, method, url, body, headers): body = self.fixtures.load( 'network_natRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce(self, method, url, body, headers): body = self.fixtures.load( 'network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteNatRule": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'network_deleteNatRule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}addNic": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_addNic.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}removeNic": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_removeNic.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_disableServerMonitoring(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}disableServerMonitoring": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_disableServerMonitoring.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_enableServerMonitoring(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}enableServerMonitoring": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_enableServerMonitoring.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeServerMonitoringPlan(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}changeServerMonitoringPlan": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_changeServerMonitoringPlan.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_osImage.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(self, method, url, body, headers): body = self.fixtures.load( 'image_osImage_BAD_REQUEST.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(self, method, url, body, headers): body = self.fixtures.load( 'image_osImage_BAD_REQUEST.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_FAKE_IMAGE_ID(self, method, url, body, headers): body = self.fixtures.load( 'image_osImage_BAD_REQUEST.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_customerImage.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(self, method, url, body, headers): body = self.fixtures.load( '2.4/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_FAKE_IMAGE_ID(self, method, url, body, headers): body = self.fixtures.load( 'image_customerImage_BAD_REQUEST.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_reconfigureServer(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}reconfigureServer": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'server_reconfigureServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cleanServer(self, method, url, body, headers): body = self.fixtures.load( 'server_cleanServer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addDisk(self, method, url, body, headers): body = self.fixtures.load( 'server_addDisk.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeDisk(self, method, url, body, headers): body = self.fixtures.load( 'server_removeDisk.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}createTagKey": raise InvalidRequestError(request.tag) name = findtext(request, 'name', TYPES_URN) description = findtext(request, 'description', TYPES_URN) value_required = findtext(request, 'valueRequired', TYPES_URN) display_on_report = findtext(request, 'displayOnReport', TYPES_URN) if name is None: raise ValueError("Name must have a value in the request") if description is not None: raise ValueError("Default description for a tag should be blank") if value_required is None or value_required != 'true': raise ValueError("Default valueRequired should be true") if display_on_report is None or display_on_report != 'true': raise ValueError("Default displayOnReport should be true") body = self.fixtures.load( 'tag_createTagKey.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_ALLPARAMS(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}createTagKey": raise InvalidRequestError(request.tag) name = findtext(request, 'name', TYPES_URN) description = findtext(request, 'description', TYPES_URN) value_required = findtext(request, 'valueRequired', TYPES_URN) display_on_report = findtext(request, 'displayOnReport', TYPES_URN) if name is None: raise ValueError("Name must have a value in the request") if description is None: raise ValueError("Description should have a value") if value_required is None or value_required != 'false': raise ValueError("valueRequired should be false") if display_on_report is None or display_on_report != 'false': raise ValueError("displayOnReport should be false") body = self.fixtures.load( 'tag_createTagKey.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_BADREQUEST(self, method, url, body, headers): body = self.fixtures.load( 'tag_createTagKey_BADREQUEST.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey(self, method, url, body, headers): body = self.fixtures.load( 'tag_tagKey_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_SINGLE(self, method, url, body, headers): body = self.fixtures.load( 'tag_tagKey_list_SINGLE.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_ALLFILTERS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'id': assert value == 'fake_id' elif key == 'name': assert value == 'fake_name' elif key == 'valueRequired': assert value == 'false' elif key == 'displayOnReport': assert value == 'false' elif key == 'pageSize': assert value == '250' else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( 'tag_tagKey_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075(self, method, url, body, headers): body = self.fixtures.load( 'tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075_NOEXIST(self, method, url, body, headers): body = self.fixtures.load( 'tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NAME(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}editTagKey": raise InvalidRequestError(request.tag) name = findtext(request, 'name', TYPES_URN) description = findtext(request, 'description', TYPES_URN) value_required = findtext(request, 'valueRequired', TYPES_URN) display_on_report = findtext(request, 'displayOnReport', TYPES_URN) if name is None: raise ValueError("Name must have a value in the request") if description is not None: raise ValueError("Description should be empty") if value_required is not None: raise ValueError("valueRequired should be empty") if display_on_report is not None: raise ValueError("displayOnReport should be empty") body = self.fixtures.load( 'tag_editTagKey.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOTNAME(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}editTagKey": raise InvalidRequestError(request.tag) name = findtext(request, 'name', TYPES_URN) description = findtext(request, 'description', TYPES_URN) value_required = findtext(request, 'valueRequired', TYPES_URN) display_on_report = findtext(request, 'displayOnReport', TYPES_URN) if name is not None: raise ValueError("Name should be empty") if description is None: raise ValueError("Description should not be empty") if value_required is None: raise ValueError("valueRequired should not be empty") if display_on_report is None: raise ValueError("displayOnReport should not be empty") body = self.fixtures.load( 'tag_editTagKey.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOCHANGE(self, method, url, body, headers): body = self.fixtures.load( 'tag_editTagKey_BADREQUEST.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}deleteTagKey": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'tag_deleteTagKey.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey_NOEXIST(self, method, url, body, headers): body = self.fixtures.load( 'tag_deleteTagKey_BADREQUEST.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}applyTags": raise InvalidRequestError(request.tag) asset_type = findtext(request, 'assetType', TYPES_URN) asset_id = findtext(request, 'assetId', TYPES_URN) tag = request.find(fixxpath('tag', TYPES_URN)) tag_key_name = findtext(tag, 'tagKeyName', TYPES_URN) value = findtext(tag, 'value', TYPES_URN) if asset_type is None: raise ValueError("assetType should not be empty") if asset_id is None: raise ValueError("assetId should not be empty") if tag_key_name is None: raise ValueError("tagKeyName should not be empty") if value is None: raise ValueError("value should not be empty") body = self.fixtures.load( 'tag_applyTags.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOVALUE(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}applyTags": raise InvalidRequestError(request.tag) asset_type = findtext(request, 'assetType', TYPES_URN) asset_id = findtext(request, 'assetId', TYPES_URN) tag = request.find(fixxpath('tag', TYPES_URN)) tag_key_name = findtext(tag, 'tagKeyName', TYPES_URN) value = findtext(tag, 'value', TYPES_URN) if asset_type is None: raise ValueError("assetType should not be empty") if asset_id is None: raise ValueError("assetId should not be empty") if tag_key_name is None: raise ValueError("tagKeyName should not be empty") if value is not None: raise ValueError("value should be empty") body = self.fixtures.load( 'tag_applyTags.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOTAGKEY(self, method, url, body, headers): body = self.fixtures.load( 'tag_applyTags_BADREQUEST.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags(self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}removeTags": raise InvalidRequestError(request.tag) body = self.fixtures.load( 'tag_removeTag.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags_NOTAG(self, method, url, body, headers): body = self.fixtures.load( 'tag_removeTag_BADREQUEST.xml' ) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag(self, method, url, body, headers): body = self.fixtures.load( 'tag_tag_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag_ALLPARAMS(self, method, url, body, headers): (_, params) = url.split('?') parameters = params.split('&') for parameter in parameters: (key, value) = parameter.split('=') if key == 'assetId': assert value == 'fake_asset_id' elif key == 'assetType': assert value == 'fake_asset_type' elif key == 'valueRequired': assert value == 'false' elif key == 'displayOnReport': assert value == 'false' elif key == 'pageSize': assert value == '250' elif key == 'datacenterId': assert value == 'fake_location' elif key == 'value': assert value == 'fake_value' elif key == 'tagKeyName': assert value == 'fake_tag_key_name' elif key == 'tagKeyId': assert value == 'fake_tag_key_id' else: raise ValueError("Could not find in url parameters {0}:{1}".format(key, value)) body = self.fixtures.load( 'tag_tag_list.xml' ) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList( self, method, url, body, headers): body = self.fixtures.load('ip_address_lists.xml') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList_FILTERBYNAME( self, method, url, body, headers): body = self.fixtures.load('ip_address_lists_FILTERBYNAME.xml') return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createIpAddressList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "createIpAddressList": raise InvalidRequestError(request.tag) net_domain = findtext(request, 'networkDomainId', TYPES_URN) if net_domain is None: raise ValueError("Network Domain should not be empty") name = findtext(request, 'name', TYPES_URN) if name is None: raise ValueError("Name should not be empty") ip_version = findtext(request, 'ipVersion', TYPES_URN) if ip_version is None: raise ValueError("IP Version should not be empty") ip_address_col_required = findall(request, 'ipAddress', TYPES_URN) child_ip_address_required = findall(request, 'childIpAddressListId', TYPES_URN) if 0 == len(ip_address_col_required) and \ 0 == len(child_ip_address_required): raise ValueError("At least one ipAddress element or " "one childIpAddressListId element must be " "provided.") if ip_address_col_required[0].get('begin') is None: raise ValueError("IP Address should not be empty") body = self.fixtures.load( 'ip_address_list_create.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editIpAddressList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "editIpAddressList": raise InvalidRequestError(request.tag) ip_address_list = request.get('id') if ip_address_list is None: raise ValueError("IpAddressList ID should not be empty") name = findtext(request, 'name', TYPES_URN) if name is not None: raise ValueError("Name should not exists in request") ip_version = findtext(request, 'ipVersion', TYPES_URN) if ip_version is not None: raise ValueError("IP Version should not exists in request") ip_address_col_required = findall(request, 'ipAddress', TYPES_URN) child_ip_address_required = findall(request, 'childIpAddressListId', TYPES_URN) if 0 == len(ip_address_col_required) and \ 0 == len(child_ip_address_required): raise ValueError("At least one ipAddress element or " "one childIpAddressListId element must be " "provided.") if ip_address_col_required[0].get('begin') is None: raise ValueError("IP Address should not be empty") body = self.fixtures.load( 'ip_address_list_edit.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteIpAddressList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "deleteIpAddressList": raise InvalidRequestError(request.tag) ip_address_list = request.get('id') if ip_address_list is None: raise ValueError("IpAddressList ID should not be empty") body = self.fixtures.load( 'ip_address_list_delete.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList( self, method, url, body, headers): body = self.fixtures.load( 'port_list_lists.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList_c8c92ea3_2da8_4d51_8153_f39bec794d69( self, method, url, body, headers): body = self.fixtures.load( 'port_list_get.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createPortList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "createPortList": raise InvalidRequestError(request.tag) net_domain = findtext(request, 'networkDomainId', TYPES_URN) if net_domain is None: raise ValueError("Network Domain should not be empty") ports_required = findall(request, 'port', TYPES_URN) child_port_list_required = findall(request, 'childPortListId', TYPES_URN) if 0 == len(ports_required) and \ 0 == len(child_port_list_required): raise ValueError("At least one port element or one " "childPortListId element must be provided") if ports_required[0].get('begin') is None: raise ValueError("PORT begin value should not be empty") body = self.fixtures.load( 'port_list_create.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editPortList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "editPortList": raise InvalidRequestError(request.tag) ports_required = findall(request, 'port', TYPES_URN) child_port_list_required = findall(request, 'childPortListId', TYPES_URN) if 0 == len(ports_required) and \ 0 == len(child_port_list_required): raise ValueError("At least one port element or one " "childPortListId element must be provided") if ports_required[0].get('begin') is None: raise ValueError("PORT begin value should not be empty") body = self.fixtures.load( 'port_list_edit.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deletePortList( self, method, url, body, headers): request = ET.fromstring(body) if request.tag != "{urn:didata.com:api:cloud:types}" \ "deletePortList": raise InvalidRequestError(request.tag) port_list = request.get('id') if port_list is None: raise ValueError("Port List ID should not be empty") body = self.fixtures.load( 'ip_address_list_delete.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cloneServer( self, method, url, body, headers): body = self.fixtures.load( '2.4/server_clone_response.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_importImage( self, method, url, body, headers): body = self.fixtures.load( '2.4/import_image_response.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_exchangeNicVlans( self, method, url, body, headers): body = self.fixtures.load( '2.4/exchange_nic_vlans_response.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeNetworkAdapter( self, method, url, body, headers): body = self.fixtures.load( '2.4/change_nic_networkadapter_response.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployUncustomizedServer( self, method, url, body, headers): body = self.fixtures.load( '2.4/deploy_customised_server.xml' ) return httplib.OK, body, {}, httplib.responses[httplib.OK] if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_azure_arm.py0000664000175000017500000004240613160302554024251 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.import libcloud import json import sys import functools from datetime import datetime import mock from libcloud.compute.base import (NodeLocation, NodeSize, VolumeSnapshot, StorageVolume) from libcloud.compute.drivers.azure_arm import AzureImage, NodeAuthPassword from libcloud.compute.providers import get_driver from libcloud.compute.types import (NodeState, Provider, StorageVolumeState, VolumeSnapshotState) from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test import unittest from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.utils.iso8601 import UTC from libcloud.utils.py3 import httplib class AzureNodeDriverTests(LibcloudTestCase): TENANT_ID = '77777777-7777-7777-7777-777777777777' SUBSCRIPTION_ID = '99999999' APPLICATION_ID = '55555555-5555-5555-5555-555555555555' APPLICATION_PASS = 'p4ssw0rd' def setUp(self): Azure = get_driver(Provider.AZURE_ARM) Azure.connectionCls.conn_class = AzureMockHttp self.driver = Azure(self.TENANT_ID, self.SUBSCRIPTION_ID, self.APPLICATION_ID, self.APPLICATION_PASS) def test_get_image(self): # Default storage suffix image = self.driver.get_image(image_id='http://www.example.com/foo/image_name') self.assertEqual(image.id, 'https://www.blob.core.windows.net/foo/image_name') self.assertEqual(image.name, 'image_name') # Custom storage suffix self.driver.connection.storage_suffix = '.core.chinacloudapi.cn' image = self.driver.get_image(image_id='http://www.example.com/foo/image_name') self.assertEqual(image.id, 'https://www.blob.core.chinacloudapi.cn/foo/image_name') self.assertEqual(image.name, 'image_name') def test_locations_returned_successfully(self): locations = self.driver.list_locations() self.assertEqual([l.name for l in locations], ["East US", "East US 2", "West US", "Central US", "South Central US", "North Europe", "West Europe", "East Asia", "Southeast Asia", "Japan East", "Japan West"]) def test_sizes_returned_successfully(self): location = self.driver.list_locations()[0] sizes = self.driver.list_sizes(location=location) self.assertEqual([l.name for l in sizes], ["Standard_A0", "Standard_A1", "Standard_A2"]) def test_ex_get_ratecard(self): ratecard = self.driver.ex_get_ratecard('0026P') self.assertEqual(set(ratecard.keys()), set(['Currency', 'Locale', 'IsTaxIncluded', 'OfferTerms', 'Meters'])) def test_create_node(self): location = NodeLocation('any_location', '', '', self.driver) size = NodeSize('any_size', '', 0, 0, 0, 0, driver=self.driver) image = AzureImage('1', '1', 'ubuntu', 'pub', location.id, self.driver) auth = NodeAuthPassword('any_password') node = self.driver.create_node( 'test-node-1', size, image, auth, location=location, ex_resource_group='000000', ex_storage_account='000000', ex_user_name='any_user', ex_network='000000', ex_subnet='000000', ex_use_managed_disks=True ) hardware_profile = node.extra['properties']['hardwareProfile'] os_profile = node.extra['properties']['osProfile'] storage_profile = node.extra['properties']['storageProfile'] self.assertEqual(node.name, 'test-node-1') self.assertEqual(node.state, NodeState.UPDATING) self.assertEqual(node.private_ips, ['10.0.0.1']) self.assertEqual(node.public_ips, []) self.assertEqual(node.extra['location'], location.id) self.assertEqual(hardware_profile['vmSize'], size.id) self.assertEqual(os_profile['adminUsername'], 'any_user') self.assertEqual(os_profile['adminPassword'], 'any_password') self.assertTrue('managedDisk' in storage_profile['osDisk']) self.assertTrue(storage_profile['imageReference'], { 'publisher': image.publisher, 'offer': image.offer, 'sku': image.sku, 'version': image.version }) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].name, 'test-node-1') self.assertEqual(nodes[0].state, NodeState.UPDATING) self.assertEqual(nodes[0].private_ips, ['10.0.0.1']) self.assertEqual(nodes[0].public_ips, []) def test_create_volume(self): location = self.driver.list_locations()[-1] volume = self.driver.create_volume( 2, 'test-disk-1', location, ex_resource_group='000000', ex_tags={'description': 'MyVolume'} ) self.assertEqual(volume.size, 2) self.assertEqual(volume.name, 'test-disk-1') self.assertEqual(volume.extra['name'], 'test-disk-1') self.assertEqual(volume.extra['tags'], {'description': 'MyVolume'}) self.assertEqual(volume.extra['location'], location.id) self.assertEqual( volume.extra['properties']['creationData']['createOption'], 'Empty') self.assertEqual( volume.extra['properties']['provisioningState'], 'Succeeded') self.assertEqual( volume.extra['properties']['diskState'], 'Attached') self.assertEqual(volume.state, StorageVolumeState.INUSE) def test_create_volume__with_snapshot(self): location = self.driver.list_locations()[0] snap_id = ( '/subscriptions/99999999-9999-9999-9999-999999999999' '/resourceGroups/000000/providers/Microsoft.Compute' '/snapshots/test-snap-1' ) snapshot = VolumeSnapshot(id=snap_id, size=2, driver=self.driver) volume = self.driver.create_volume( 2, 'test-disk-1', location, snapshot=snapshot, ex_resource_group='000000', ex_tags={'description': 'MyVolume'} ) self.assertEqual( volume.extra['properties']['creationData']['createOption'], 'Copy') self.assertEqual( volume.extra['properties']['creationData']['sourceUri'], snap_id) def test_create_volume__required_kw(self): location = self.driver.list_locations()[0] fn = functools.partial(self.driver.create_volume, 2, 'test-disk-1') self.assertRaises(ValueError, fn) self.assertRaises(ValueError, fn, location=location) self.assertRaises(ValueError, fn, ex_resource_group='000000') ret_value = fn(ex_resource_group='000000', location=location) self.assertTrue(isinstance(ret_value, StorageVolume)) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 3) self.assertEqual(volumes[0].name, 'test-disk-1') self.assertEqual(volumes[0].size, 31) self.assertEqual( volumes[0].extra['properties']['provisioningState'], 'Succeeded') self.assertEqual( volumes[0].extra['properties']['diskState'], 'Attached') self.assertEqual(volumes[0].state, StorageVolumeState.INUSE) self.assertEqual(volumes[1].name, 'test-disk-2') self.assertEqual(volumes[1].size, 31) self.assertEqual( volumes[1].extra['properties']['provisioningState'], 'Updating') self.assertEqual( volumes[1].extra['properties']['diskState'], 'Unattached') self.assertEqual(volumes[1].state, StorageVolumeState.UPDATING) self.assertEqual(volumes[2].name, 'test-disk-3') self.assertEqual(volumes[2].size, 10) self.assertEqual( volumes[2].extra['properties']['provisioningState'], 'Succeeded') self.assertEqual( volumes[2].extra['properties']['diskState'], 'Unattached') self.assertEqual(StorageVolumeState.AVAILABLE, volumes[2].state) def test_list_volumes__with_resource_group(self): volumes = self.driver.list_volumes(ex_resource_group='111111') self.assertEqual(len(volumes), 1) self.assertEqual(volumes[0].name, 'test-disk-3') self.assertEqual(volumes[0].size, 10) self.assertEqual( volumes[0].extra['properties']['provisioningState'], 'Succeeded') self.assertEqual( volumes[0].extra['properties']['diskState'], 'Unattached') self.assertEqual(volumes[0].state, StorageVolumeState.AVAILABLE) def test_attach_volume(self): volumes = self.driver.list_volumes() node = self.driver.list_nodes()[0] self.driver.attach_volume(node, volumes[0], ex_lun=0) self.driver.attach_volume(node, volumes[1], ex_lun=15) self.driver.attach_volume(node, volumes[2]) data_disks = node.extra['properties']['storageProfile']['dataDisks'] luns = [disk['lun'] for disk in data_disks] self.assertTrue(len(data_disks), len(volumes)) self.assertTrue(set(luns), set([0, 1, 15])) def test_detach_volume(self): volumes = self.driver.list_volumes() node = self.driver.list_nodes()[0] for volume in volumes: self.driver.attach_volume(node, volume) data_disks = node.extra['properties']['storageProfile']['dataDisks'] self.assertEqual(len(data_disks), len(volumes)) for volume in volumes: self.driver.detach_volume(volume, ex_node=node) data_disks = node.extra['properties']['storageProfile']['dataDisks'] self.assertEqual(len(data_disks), 0) def test_destroy_volume(self): volume = self.driver.list_volumes()[0] ret_value = self.driver.destroy_volume(volume) self.assertTrue(ret_value) def test_create_volume_snapshot(self): location = self.driver.list_locations()[-1] volume = self.driver.list_volumes()[0] snap = self.driver.create_volume_snapshot( volume, 'test-snap-1', location=location, ex_resource_group='000000' ) self.assertEqual(snap.name, 'test-snap-1') self.assertEqual(snap.extra['name'], 'test-snap-1') self.assertEqual(snap.size, 1) self.assertEqual(snap.extra['source_id'], volume.id) self.assertEqual(snap.state, VolumeSnapshotState.CREATING) self.assertEqual(snap.extra['location'], location.id) self.assertEqual( snap.extra['properties']['provisioningState'], 'Creating') self.assertEqual( snap.extra['properties']['diskState'], 'Unattached') # 2017-03-09T14:28:27.8655868+00:00" self.assertEqual( datetime(2017, 3, 9, 14, 28, 27, 865586, tzinfo=UTC), snap.created) def test_create_volume_snapshot__required_kw(self): location = self.driver.list_locations()[0] volume = self.driver.list_volumes()[0] fn = functools.partial(self.driver.create_volume_snapshot, volume) self.assertRaises(ValueError, fn) self.assertRaises(ValueError, fn, name='test-snap-1') self.assertRaises(ValueError, fn, location=location) self.assertRaises(ValueError, fn, ex_resource_group='000000') ret_value = fn( name='test-snap-1', ex_resource_group='000000', location=location ) self.assertTrue(isinstance(ret_value, VolumeSnapshot)) def test_list_snapshots(self): snaps = self.driver.list_snapshots() self.assertEqual(len(snaps), 4) self.assertEqual(snaps[0].name, 'test-snap-1') self.assertEqual(snaps[0].extra['name'], 'test-snap-1') self.assertEqual(snaps[0].state, VolumeSnapshotState.CREATING) self.assertEqual( snaps[0].extra['source_id'], '/subscriptions/99999999-9999-9999-9999-999999999999' '/resourceGroups/000000/providers/Microsoft.Compute' '/disks/test-disk-1') self.assertEqual(snaps[0].size, 1) self.assertEqual(snaps[0].extra['tags']['test_snap'], 'test') self.assertTrue(isinstance(snaps[3].created, datetime)) self.assertEqual(snaps[3].name, 'test-snap-4') self.assertEqual(snaps[3].extra['name'], 'test-snap-4') self.assertEqual(snaps[3].state, VolumeSnapshotState.ERROR) self.assertEqual( snaps[3].extra['source_id'], '/subscriptions/99999999-9999-9999-9999-999999999999' '/resourceGroups/111111/providers/Microsoft.Compute' '/disks/test-disk-4') self.assertEqual(snaps[3].size, 2) self.assertTrue(isinstance(snaps[3].created, datetime)) def test_list_snapshots_in_resource_group(self): snaps = self.driver.list_snapshots(ex_resource_group='111111') self.assertEqual(len(snaps), 2) self.assertEqual(snaps[0].name, 'test-snap-3') self.assertEqual(snaps[0].extra['name'], 'test-snap-3') self.assertEqual(snaps[0].state, VolumeSnapshotState.ERROR) self.assertEqual( snaps[0].extra['source_id'], '/subscriptions/99999999-9999-9999-9999-999999999999' '/resourceGroups/111111/providers/Microsoft.Compute' '/disks/test-disk-3') self.assertEqual(snaps[0].size, 2) self.assertTrue(isinstance(snaps[0].created, datetime)) def test_list_volume_snapshots(self): volume = self.driver.list_volumes()[0] self.assertTrue(volume.name == 'test-disk-1') snapshots = self.driver.list_volume_snapshots(volume) self.assertEqual(len(snapshots), 1) self.assertEqual(snapshots[0].name, 'test-snap-1') self.assertEqual(volume.id, snapshots[0].extra['source_id']) def test_destroy_volume_snapshot(self): snapshot = self.driver.list_snapshots()[0] res_value = snapshot.destroy() self.assertTrue(res_value) def test_get_instance_vhd(self): with mock.patch.object(self.driver, '_ex_delete_old_vhd'): # Default storage suffix vhd_url = self.driver._get_instance_vhd(name='test1', ex_resource_group='000000', ex_storage_account='sga1') self.assertEqual(vhd_url, 'https://sga1.blob.core.windows.net/vhds/test1-os_0.vhd') # Custom storage suffix self.driver.connection.storage_suffix = '.core.chinacloudapi.cn' vhd_url = self.driver._get_instance_vhd(name='test1', ex_resource_group='000000', ex_storage_account='sga1') self.assertEqual(vhd_url, 'https://sga1.blob.core.chinacloudapi.cn/vhds/test1-os_0.vhd') class AzureMockHttp(MockHttp): fixtures = ComputeFileFixtures('azure_arm') def _update(self, fixture, body): for key, value in body.items(): if isinstance(value, dict): fixture[key] = self._update(fixture.get(key, {}), value) else: fixture[key] = body[key] return fixture def __getattr__(self, n): def fn(method, url, body, headers): # Note: We use shorter fixture name so we don't exceed 143 # character limit for file names file_name = n.replace('99999999_9999_9999_9999_999999999999', AzureNodeDriverTests.SUBSCRIPTION_ID) fixture = self.fixtures.load(file_name + ".json") if method in ('POST', 'PUT'): try: body = json.loads(body) fixture_tmp = json.loads(fixture) fixture_tmp = self._update(fixture_tmp, body) fixture = json.dumps(fixture_tmp) except ValueError: pass return (httplib.OK, fixture, headers, httplib.responses[httplib.OK]) return fn if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_base.py0000664000175000017500000000775113153541406023205 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.common.base import Connection, ConnectionKey, ConnectionUserAndKey from libcloud.common.types import LibcloudError from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver, StorageVolume from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword from libcloud.compute.types import StorageVolumeState class FakeDriver(object): type = 0 class BaseTests(unittest.TestCase): def test_base_node(self): Node(id=0, name=0, state=0, public_ips=0, private_ips=0, driver=FakeDriver()) def test_base_node_size(self): NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0, driver=FakeDriver()) def test_base_node_image(self): NodeImage(id=0, name=0, driver=FakeDriver()) def test_base_storage_volume(self): StorageVolume(id="0", name="0", size=10, driver=FakeDriver(), state=StorageVolumeState.AVAILABLE) def test_base_node_driver(self): NodeDriver('foo') def test_base_connection_key(self): ConnectionKey('foo') def test_base_connection_userkey(self): ConnectionUserAndKey('foo', 'bar') def test_base_connection_timeout(self): Connection(timeout=10) class TestValidateAuth(unittest.TestCase): def test_get_auth_ssh(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} auth = NodeAuthSSHKey('pubkey...') self.assertEqual(auth, n._get_and_check_auth(auth)) def test_get_auth_ssh_but_given_password(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} auth = NodeAuthPassword('password') self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_password(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = NodeAuthPassword('password') self.assertEqual(auth, n._get_and_check_auth(auth)) def test_get_auth_password_but_given_ssh_key(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = NodeAuthSSHKey('publickey') self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_default_ssh_key(self): n = NodeDriver('foo') n.features = {'create_node': ['ssh_key']} self.assertEqual(None, n._get_and_check_auth(None)) def test_get_auth_default_password(self): n = NodeDriver('foo') n.features = {'create_node': ['password']} auth = n._get_and_check_auth(None) self.assertTrue(isinstance(auth, NodeAuthPassword)) def test_get_auth_default_no_feature(self): n = NodeDriver('foo') self.assertEqual(None, n._get_and_check_auth(None)) def test_get_auth_generates_password_but_given_nonsense(self): n = NodeDriver('foo') n.features = {'create_node': ['generates_password']} auth = "nonsense" self.assertRaises(LibcloudError, n._get_and_check_auth, auth) def test_get_auth_no_features_but_given_nonsense(self): n = NodeDriver('foo') auth = "nonsense" self.assertRaises(LibcloudError, n._get_and_check_auth, auth) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_vcloud.py0000664000175000017500000010766713153541406023576 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib, b from libcloud.utils.py3 import ET from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess from libcloud.compute.drivers.vcloud import VCloud_5_1_NodeDriver from libcloud.compute.drivers.vcloud import VCloud_5_5_NodeDriver from libcloud.compute.drivers.vcloud import Vdc from libcloud.compute.base import Node, NodeImage from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VCLOUD_PARAMS class TerremarkTests(unittest.TestCase, TestCaseMixin): def setUp(self): VCloudNodeDriver.connectionCls.host = "test" VCloudNodeDriver.connectionCls.conn_class = TerremarkMockHttp TerremarkMockHttp.type = None self.driver = TerremarkDriver(*VCLOUD_PARAMS) def test_list_images(self): ret = self.driver.list_images() self.assertEqual( ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5') def test_list_sizes(self): ret = self.driver.list_sizes() self.assertEqual(ret[0].ram, 512) def test_create_node(self): image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] node = self.driver.create_node( name='testerpart2', image=image, size=size, vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', cpus=2, ) self.assertTrue(isinstance(node, Node)) self.assertEqual( node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') self.assertEqual(node.name, 'testerpart2') def test_list_nodes(self): ret = self.driver.list_nodes() node = ret[0] self.assertEqual( node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') self.assertEqual(node.name, 'testerpart2') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, []) self.assertEqual(node.private_ips, ['10.112.78.69']) def test_reboot_node(self): node = self.driver.list_nodes()[0] ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_destroy_node(self): node = self.driver.list_nodes()[0] ret = self.driver.destroy_node(node) self.assertTrue(ret) class VCloud_1_5_Tests(unittest.TestCase, TestCaseMixin): def setUp(self): VCloudNodeDriver.connectionCls.host = 'test' VCloudNodeDriver.connectionCls.conn_class = VCloud_1_5_MockHttp VCloud_1_5_MockHttp.type = None self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS) def test_list_images(self): ret = self.driver.list_images() self.assertEqual( 'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id) def test_list_sizes(self): ret = self.driver.list_sizes() self.assertEqual(ret[0].ram, 512) def test_networks(self): ret = self.driver.networks self.assertEqual( ret[0].get('href'), 'https://vm-vcloud/api/network/dca8b667-6c8f-4c3e-be57-7a9425dba4f4') def test_create_node(self): image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] node = self.driver.create_node( name='testNode', image=image, size=size, ex_vdc='MyVdc', ex_network='vCloud - Default', cpus=2, ) self.assertTrue(isinstance(node, Node)) self.assertEqual( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id) self.assertEqual('testNode', node.name) def test_create_node_clone(self): image = self.driver.list_nodes()[0] node = self.driver.create_node(name='testNode', image=image) self.assertTrue(isinstance(node, Node)) self.assertEqual( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id) self.assertEqual('testNode', node.name) def test_list_nodes(self): ret = self.driver.list_nodes() node = ret[0] self.assertEqual( node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a') self.assertEqual(node.name, 'testNode') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['65.41.67.2']) self.assertEqual(node.private_ips, ['65.41.67.2']) self.assertEqual(node.extra, {'vdc': 'MyVdc', 'vms': [{ 'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e045', 'name': 'testVm', 'state': NodeState.RUNNING, 'public_ips': ['65.41.67.2'], 'private_ips': ['65.41.67.2'], 'os_type': 'rhel5_64Guest' }]}) node = ret[1] self.assertEqual( node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b') self.assertEqual(node.name, 'testNode2') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['192.168.0.103']) self.assertEqual(node.private_ips, ['192.168.0.100']) self.assertEqual(node.extra, {'vdc': 'MyVdc', 'vms': [{ 'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e046', 'name': 'testVm2', 'state': NodeState.RUNNING, 'public_ips': ['192.168.0.103'], 'private_ips': ['192.168.0.100'], 'os_type': 'rhel5_64Guest' }]}) def test_reboot_node(self): node = self.driver.list_nodes()[0] ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_destroy_node(self): node = self.driver.list_nodes()[0] ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_validate_vm_names(self): # valid inputs self.driver._validate_vm_names(['host-n-ame-name']) self.driver._validate_vm_names(['tc-mybuild-b1']) self.driver._validate_vm_names(None) # invalid inputs self.assertRaises( ValueError, self.driver._validate_vm_names, ['invalid.host']) self.assertRaises( ValueError, self.driver._validate_vm_names, ['inv-alid.host']) self.assertRaises( ValueError, self.driver._validate_vm_names, ['hostnametoooolong']) self.assertRaises( ValueError, self.driver._validate_vm_names, ['host$name']) def test_change_vm_names(self): self.driver._change_vm_names( '/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', ['changed1', 'changed2']) def test_change_vm_admin_password(self): self.driver.ex_change_vm_admin_password( '/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', "12345678") def test_is_node(self): self.assertTrue(self.driver._is_node( Node('testId', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver))) self.assertFalse(self.driver._is_node( NodeImage('testId', 'testNode', driver=self.driver))) def test_ex_deploy(self): node = self.driver.ex_deploy_node( Node('/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver), ex_force_customization=False) self.assertEqual(node.state, NodeState.RUNNING) def test_ex_undeploy(self): node = self.driver.ex_undeploy_node( Node('https://test/api/vApp/undeployTest', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver)) self.assertEqual(node.state, NodeState.STOPPED) def test_ex_undeploy_with_error(self): node = self.driver.ex_undeploy_node( Node('https://test/api/vApp/undeployErrorTest', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver)) self.assertEqual(node.state, NodeState.STOPPED) def test_ex_find_node(self): node = self.driver.ex_find_node('testNode') self.assertEqual(node.name, "testNode") node = self.driver.ex_find_node('testNode', self.driver.vdcs[0]) self.assertEqual(node.name, "testNode") node = self.driver.ex_find_node('testNonExisting', self.driver.vdcs[0]) self.assertEqual(node, None) def test_ex_add_vm_disk__with_invalid_values(self): self.assertRaises( ValueError, self.driver.ex_add_vm_disk, 'dummy', 'invalid value') self.assertRaises( ValueError, self.driver.ex_add_vm_disk, 'dummy', '-1') def test_ex_add_vm_disk(self): self.driver.ex_add_vm_disk('https://test/api/vApp/vm-test', '20') def test_ex_set_vm_cpu__with_invalid_values(self): self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', 50) self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', -1) def test_ex_set_vm_cpu(self): self.driver.ex_set_vm_cpu('https://test/api/vApp/vm-test', 4) def test_ex_set_vm_memory__with_invalid_values(self): self.assertRaises( ValueError, self.driver.ex_set_vm_memory, 'dummy', 777) self.assertRaises( ValueError, self.driver.ex_set_vm_memory, 'dummy', -1024) def test_ex_set_vm_memory(self): self.driver.ex_set_vm_memory('https://test/api/vApp/vm-test', 1024) def test_vdcs(self): vdcs = self.driver.vdcs self.assertEqual(len(vdcs), 1) self.assertEqual( vdcs[0].id, 'https://vm-vcloud/api/vdc/3d9ae28c-1de9-4307-8107-9356ff8ba6d0') self.assertEqual(vdcs[0].name, 'MyVdc') self.assertEqual(vdcs[0].allocation_model, 'AllocationPool') self.assertEqual(vdcs[0].storage.limit, 5120000) self.assertEqual(vdcs[0].storage.used, 1984512) self.assertEqual(vdcs[0].storage.units, 'MB') self.assertEqual(vdcs[0].cpu.limit, 160000) self.assertEqual(vdcs[0].cpu.used, 0) self.assertEqual(vdcs[0].cpu.units, 'MHz') self.assertEqual(vdcs[0].memory.limit, 527360) self.assertEqual(vdcs[0].memory.used, 130752) self.assertEqual(vdcs[0].memory.units, 'MB') def test_ex_list_nodes(self): self.assertEqual( len(self.driver.ex_list_nodes()), len(self.driver.list_nodes())) def test_ex_list_nodes__masked_exception(self): """ Test that we don't mask other exceptions. """ brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver) self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc)) def test_ex_power_off(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_power_off_node(node) def test_ex_query(self): results = self.driver.ex_query( 'user', filter='name==jrambo', page=2, page_size=30, sort_desc='startDate') self.assertEqual(len(results), 1) self.assertEqual(results[0]['type'], 'UserRecord') self.assertEqual(results[0]['name'], 'jrambo') self.assertEqual(results[0]['isLdapUser'], 'true') def test_ex_get_control_access(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) control_access = self.driver.ex_get_control_access(node) self.assertEqual( control_access.everyone_access_level, ControlAccess.AccessLevel.READ_ONLY) self.assertEqual(len(control_access.subjects), 1) self.assertEqual(control_access.subjects[0].type, 'group') self.assertEqual(control_access.subjects[0].name, 'MyGroup') self.assertEqual(control_access.subjects[ 0].id, 'https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413') self.assertEqual(control_access.subjects[ 0].access_level, ControlAccess.AccessLevel.FULL_CONTROL) def test_ex_set_control_access(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) control_access = ControlAccess(node, None, [Subject( name='MyGroup', type='group', access_level=ControlAccess.AccessLevel.FULL_CONTROL)]) self.driver.ex_set_control_access(node, control_access) def test_ex_get_metadata(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) metadata = self.driver.ex_get_metadata(node) self.assertEqual(metadata, {'owners': 'msamia@netsuite.com'}) def test_ex_set_metadata_entry(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_set_metadata_entry(node, 'foo', 'bar') class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin): def setUp(self): VCloudNodeDriver.connectionCls.host = 'test' VCloudNodeDriver.connectionCls.conn_class = VCloud_1_5_MockHttp VCloud_1_5_MockHttp.type = None self.driver = VCloudNodeDriver( *VCLOUD_PARAMS, **{'api_version': '5.1'}) self.assertTrue(isinstance(self.driver, VCloud_5_1_NodeDriver)) def _test_create_node_valid_ex_vm_memory(self): # TODO: Hook up the fixture values = [4, 1024, 4096] image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] for value in values: self.driver.create_node( name='testerpart2', image=image, size=size, vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', cpus=2, ex_vm_memory=value ) def test_create_node_invalid_ex_vm_memory(self): values = [1, 3, 7] image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] for value in values: try: self.driver.create_node( name='testerpart2', image=image, size=size, vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', cpus=2, ex_vm_memory=value ) except ValueError: pass else: self.fail('Exception was not thrown') def test_list_images(self): ret = self.driver.list_images() self.assertEqual( 'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id) class VCloud_5_5_Tests(unittest.TestCase, TestCaseMixin): def setUp(self): VCloudNodeDriver.connectionCls.host = 'test' VCloudNodeDriver.connectionCls.conn_class = VCloud_5_5_MockHttp VCloud_5_5_MockHttp.type = None self.driver = VCloudNodeDriver( *VCLOUD_PARAMS, **{'api_version': '5.5'}) self.assertTrue(isinstance(self.driver, VCloud_5_5_NodeDriver)) def test_ex_create_snapshot(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_create_snapshot(node) def test_ex_remove_snapshots(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_remove_snapshots(node) def test_ex_revert_to_snapshot(self): node = Node( 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_revert_to_snapshot(node) def test_ex_acquire_mks_ticket(self): node = self.driver.ex_find_node('testNode') self.driver.ex_acquire_mks_ticket(node.id) class TerremarkMockHttp(MockHttp): fixtures = ComputeFileFixtures('terremark') def _api_v0_8_login(self, method, url, body, headers): headers['set-cookie'] = 'vcloud-token=testtoken' body = self.fixtures.load('api_v0_8_login.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_org_240(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_org_240.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_vdc_224(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_vdc_224.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_vdc_224_catalog(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_catalogItem_5(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_catalogItem_5.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers): body = self.fixtures.load( 'api_v0_8_vdc_224_action_instantiateVAppTemplate.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_task_10496(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_task_10496.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers): body = self.fixtures.load( 'api_v0_8_vapp_14031_power_action_powerOn.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_vapp_14031(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('api_v0_8_vapp_14031_get.xml') elif method == 'DELETE': body = '' return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers): body = self.fixtures.load( 'api_v0_8_vapp_14031_power_action_poweroff.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _api_v0_8_task_11001(self, method, url, body, headers): body = self.fixtures.load('api_v0_8_task_11001.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) class AnotherErrorMember(Exception): """ helper class for the synthetic exception """ def __init__(self): self.tag = 'Error' def get(self, foo): return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN_1' class AnotherError(Exception): pass class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('vcloud_1_5') def request(self, method, url, body=None, headers=None, raw=False, stream=False): self.assertTrue(url.startswith('/api/'), ('"%s" is invalid. Needs to ' 'start with "/api". The passed URL should be just ' 'the path, not full URL.', url)) super(VCloud_1_5_MockHttp, self).request(method, url, body, headers, raw) def _api_sessions(self, method, url, body, headers): headers['x-vcloud-authorization'] = 'testtoken' body = self.fixtures.load('api_sessions.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_org(self, method, url, body, headers): body = self.fixtures.load('api_org.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a(self, method, url, body, headers): body = self.fixtures.load( 'api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4(self, method, url, body, headers): body = self.fixtures.load( 'api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, headers): body = self.fixtures.load( 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vdc_brokenVdc(self, method, url, body, headers): body = self.fixtures.load('api_vdc_brokenVdc.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_errorRaiser(self, method, url, body, headers): m = AnotherErrorMember() raise AnotherError(m) def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers): body = self.fixtures.load( 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn(self, method, url, body, headers): return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) # Clone def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp(self, method, url, body, headers): body = self.fixtures.load( 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_networkConnectionSection(self, method, url, body, headers): body = self.fixtures.load( 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a(self, method, url, body, headers): status = httplib.OK if method == 'GET': body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml') status = httplib.OK elif method == 'DELETE': body = self.fixtures.load( 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') status = httplib.ACCEPTED return status, body, headers, httplib.responses[status] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, headers): body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers): body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers): body = self.fixtures.load( 'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') status = httplib.OK else: body = self.fixtures.load( 'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') status = httplib.ACCEPTED return status, body, headers, httplib.responses[status] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset(self, method, url, body, headers): return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) def _api_task_b034df55_fe81_4798_bc81_1f0fd0ead450(self, method, url, body, headers): body = self.fixtures.load( 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4(self, method, url, body, headers): body = self.fixtures.load( 'api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_catalogItem_3132e037_759b_4627_9056_ca66466fa607(self, method, url, body, headers): body = self.fixtures.load( 'api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_deployTest(self, method, url, body, headers): body = self.fixtures.load('api_task_deploy.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_action_deploy(self, method, url, body, headers): body = self.fixtures.load('api_task_deploy.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_task_deploy(self, method, url, body, headers): body = self.fixtures.load('api_task_deploy.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_vApp_undeployTest(self, method, url, body, headers): body = self.fixtures.load('api_vApp_undeployTest.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_undeployTest_action_undeploy(self, method, url, body, headers): body = self.fixtures.load('api_task_undeploy.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_task_undeploy(self, method, url, body, headers): body = self.fixtures.load('api_task_undeploy.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_undeployErrorTest(self, method, url, body, headers): body = self.fixtures.load('api_vApp_undeployTest.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_undeployErrorTest_action_undeploy(self, method, url, body, headers): if b('shutdown') in b(body): body = self.fixtures.load('api_task_undeploy_error.xml') else: body = self.fixtures.load('api_task_undeploy.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_task_undeployError(self, method, url, body, headers): body = self.fixtures.load('api_task_undeploy_error.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_access_to_resource_forbidden(self, method, url, body, headers): raise Exception( ET.fromstring(self.fixtures.load('api_vApp_vapp_access_to_resource_forbidden.xml'))) def _api_vApp_vm_test(self, method, url, body, headers): body = self.fixtures.load('api_vApp_vm_test.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vm_test_virtualHardwareSection_disks(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'get_api_vApp_vm_test_virtualHardwareSection_disks.xml') status = httplib.OK else: body = self.fixtures.load( 'put_api_vApp_vm_test_virtualHardwareSection_disks.xml') status = httplib.ACCEPTED return status, body, headers, httplib.responses[status] def _api_vApp_vm_test_virtualHardwareSection_cpu(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'get_api_vApp_vm_test_virtualHardwareSection_cpu.xml') status = httplib.OK else: body = self.fixtures.load( 'put_api_vApp_vm_test_virtualHardwareSection_cpu.xml') status = httplib.ACCEPTED return status, body, headers, httplib.responses[status] def _api_vApp_vm_test_virtualHardwareSection_memory(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'get_api_vApp_vm_test_virtualHardwareSection_memory.xml') status = httplib.OK else: body = self.fixtures.load( 'put_api_vApp_vm_test_virtualHardwareSection_memory.xml') status = httplib.ACCEPTED return status, body, headers, httplib.responses[status] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_powerOff(self, method, url, body, headers): return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(self, method, url, body, headers): assert method == 'POST' body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] def _api_query(self, method, url, body, headers): assert method == 'GET' if 'type=user' in url: self.assertTrue('page=2' in url) self.assertTrue('filter=(name==jrambo)' in url) self.assertTrue('sortDesc=startDate') body = self.fixtures.load('api_query_user.xml') elif 'type=group' in url: body = self.fixtures.load('api_query_group.xml') else: raise AssertionError('Unexpected query type') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('api_vapp_post_metadata.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] else: body = self.fixtures.load('api_vapp_get_metadata.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers): body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_controlAccess(self, method, url, body, headers): body = str(body) self.assertTrue(method == 'POST') self.assertTrue( 'false' in body) self.assertTrue( '' in body) self.assertTrue('FullControl' in body) body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413(self, method, url, body, headers): body = self.fixtures.load( 'api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] class VCloud_5_5_MockHttp(VCloud_1_5_MockHttp): # TODO: Move 5.5 fixtures to their own folder def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_createSnapshot(self, method, url, body, headers): assert method == 'POST' body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_create_snapshot.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_task_fab4b26f_4f2e_4d49_ad01_ae9324bbfe48(self, method, url, body, headers): body = self.fixtures.load( 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_removeAllSnapshots(self, method, url, body, headers): assert method == 'POST' body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_remove_snapshots.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_task_2518935e_b315_4d8e_9e99_9275f751877c(self, method, url, body, headers): body = self.fixtures.load( 'api_task_2518935e_b315_4d8e_9e99_9275f751877c.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_revertToCurrentSnapshot(self, method, url, body, headers): assert method == 'POST' body = self.fixtures.load( 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_revert_snapshot.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] def _api_task_fe75d3af_f5a3_44a5_b016_ae0bdadfc32b(self, method, url, body, headers): body = self.fixtures.load( 'api_task_fe75d3af_f5a3_44a5_b016_ae0bdadfc32b.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_nephoscale.py0000664000175000017500000001632213153541406024406 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Created by Markos Gogoulos (https://mist.io) # import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.nephoscale import NephoscaleNodeDriver from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class NephoScaleTest(unittest.TestCase, TestCaseMixin): def setUp(self): NephoscaleNodeDriver.connectionCls.conn_class = NephoscaleMockHttp self.driver = NephoscaleNodeDriver('user', 'password') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 13) for size in sizes: self.assertEqual(type(size.disk), int) self.assertEqual(type(size.ram), int) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 18) for image in images: arch = image.extra.get('architecture') self.assertTrue(arch.startswith('x86')) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 2) self.assertEqual(locations[0].name, "SJC-1") def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].extra.get('zone'), 'RIC-1') self.assertEqual(nodes[0].name, 'mongodb-staging') self.assertEqual(nodes[0].extra.get('service_type'), 'CS05 - 0.5GB, 1Core, 25GB') def test_list_keys(self): keys = self.driver.ex_list_keypairs() self.assertEqual(len(keys), 2) self.assertEqual(keys[0].name, 'mistio-ssh') def test_list_ssh_keys(self): ssh_keys = self.driver.ex_list_keypairs(ssh=True) self.assertEqual(len(ssh_keys), 1) self.assertTrue(ssh_keys[0].public_key.startswith('ssh-rsa')) def test_list_password_keys(self): password_keys = self.driver.ex_list_keypairs(password=True) self.assertEqual(len(password_keys), 1) self.assertEqual(password_keys[0].password, '23d493j5') def test_reboot_node(self): node = self.driver.list_nodes()[0] result = self.driver.reboot_node(node) self.assertTrue(result) def test_destroy_node(self): node = self.driver.list_nodes()[0] result = self.driver.destroy_node(node) self.assertTrue(result) def test_stop_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_stop_node(node) self.assertTrue(result) def test_start_node(self): node = self.driver.list_nodes()[0] result = self.driver.ex_start_node(node) self.assertTrue(result) def test_rename_node(self): node = self.driver.list_nodes()[0] result = self.driver.rename_node(node, 'new-name') self.assertTrue(result) def test_create_node(self): name = 'mongodb-staging' size = self.driver.list_sizes()[0] image = self.driver.list_images()[3] node = self.driver.create_node(name=name, size=size, nowait=True, image=image) self.assertEqual(node.name, 'mongodb-staging') def test_create_node_no_name(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[3] self.assertRaises(TypeError, self.driver.create_node, size=size, image=image) def test_delete_ssh_keys(self): self.assertTrue(self.driver.ex_delete_keypair(key_id=72209, ssh=True)) def test_delete_password_keys(self): self.assertTrue(self.driver.ex_delete_keypair(key_id=72211)) class NephoscaleMockHttp(MockHttp): fixtures = ComputeFileFixtures('nephoscale') def _server_type_cloud(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _server_cloud(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('success_action.json') else: body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _image_server(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _datacenter_zone(self, method, url, body, headers): body = self.fixtures.load('list_locations.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _key(self, method, url, body, headers): body = self.fixtures.load('list_keys.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _key_sshrsa(self, method, url, body, headers): body = self.fixtures.load('list_ssh_keys.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _key_password(self, method, url, body, headers): body = self.fixtures.load('list_password_keys.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _server_cloud_88241(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _server_cloud_88241_initiator_restart(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _server_cloud_88241_initiator_start(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _server_cloud_88241_initiator_stop(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _key_password_72211(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _key_sshrsa_72209(self, method, url, body, headers): body = self.fixtures.load('success_action.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_profitbricks.py0000664000175000017500000036622713153541406025002 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.compute.types import Provider from libcloud.compute.types import NodeState from libcloud.compute.providers import get_driver from libcloud.test import unittest from libcloud.test.secrets import PROFIT_BRICKS_PARAMS class ProfitBricksTests(unittest.TestCase): def setUp(self): ProfitBricks = get_driver(Provider.PROFIT_BRICKS) ProfitBricks.connectionCls.conn_class = ProfitBricksMockHttp self.driver = ProfitBricks(*PROFIT_BRICKS_PARAMS) ''' Function tests for listing items ''' def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 7) def test_list_images(self): ''' Fetch all images and then fetch with filters ''' all_images = self.driver.list_images() hdd_images = self.driver.list_images('HDD') cdd_images = self.driver.list_images('CDROM') private_images = self.driver.list_images(is_public=False) self.assertEqual(len(all_images), 4) self.assertEqual(len(hdd_images), 2) self.assertEqual(len(cdd_images), 2) self.assertEqual(len(private_images), 2) image = all_images[0] extra = image.extra ''' Standard properties ''' self.assertEqual(image.id, 'img-1') self.assertEqual(image.name, 'Test-Image-Two-CDROM') ''' Extra metadata ''' self.assertEqual(extra['created_date'], '2014-11-14T15:22:19Z') self.assertEqual(extra['created_by'], 'System') self.assertEqual(extra['etag'], '957e0eac7456fa7554e73bf0d18860eb') self.assertEqual(extra['last_modified_date'], '2014-11-14T15:22:19Z') self.assertEqual(extra['last_modified_by'], 'System') ''' Extra properties ''' self.assertEqual(extra['name'], 'Test-Image-Two-CDROM') self.assertEqual(extra['description'], '') self.assertEqual(extra['location'], 'us/las') self.assertEqual(extra['size'], 4) self.assertEqual(extra['cpu_hot_plug'], False) self.assertEqual(extra['cpu_hot_unplug'], False) self.assertEqual(extra['ram_hot_plug'], False) self.assertEqual(extra['ram_hot_unplug'], False) self.assertEqual(extra['nic_hot_plug'], False) self.assertEqual(extra['nic_hot_unplug'], False) self.assertEqual(extra['disc_virtio_hot_plug'], False) self.assertEqual(extra['disc_virtio_hot_unplug'], False) self.assertEqual(extra['disc_scsi_hot_plug'], False) self.assertEqual(extra['disc_scsi_hot_unplug'], False) self.assertEqual(extra['licence_type'], 'OTHER') self.assertEqual(extra['image_type'], 'CDROM') self.assertEqual(extra['public'], True) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 3) ''' Standard properties ''' location = locations[0] self.assertEqual(location.id, 'de/fkb') self.assertEqual(location.name, 'karlsruhe') self.assertEqual(location.country, 'de') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) node = nodes[0] extra = node.extra ''' Standard properties ''' self.assertEqual( node.id, 'srv-1' ) self.assertEqual( node.name, 'Test Node.' ) self.assertEqual( node.state, NodeState.RUNNING ) self.assertEqual( node.public_ips, [] ) self.assertEqual( node.private_ips, [] ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'e7cf186125f51f3d9511754a40dcd12c' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['boot_cdrom'], None ) self.assertEqual( extra['boot_volume']['id'], 'bvol-1' ) self.assertEqual( extra['boot_volume']['href'], ( '/cloudapi/v3/datacenters/dc-1' '/volumes/bvol-1' ) ) self.assertEqual( extra['boot_volume']['properties']['name'], 'Test Node Volume' ) self.assertEqual( extra['boot_volume']['properties']['type'], 'HDD' ) self.assertEqual( extra['boot_volume']['properties']['size'], 10 ) self.assertEqual( extra['boot_volume']['properties']['image'], 'bvol-img' ) self.assertEqual( extra['cpu_family'], 'AMD_OPTERON' ) ''' Other miscellaneous ''' self.assertEqual( len(extra['entities']), 3 ) self.assertNotIn( 'status_url', extra ) def test_ex_list_availability_zones(self): zones = self.driver.ex_list_availability_zones() self.assertEqual(len(zones), 3) zones_sorted = sorted(list(a.name for a in zones)) zones_expected = ['AUTO', 'ZONE_1', 'ZONE_2'] self.assertEqual(zones_sorted, zones_expected) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 3) volume = volumes[0] extra = volume.extra ''' Standard properties ''' self.assertEqual( volume.id, 'bvol-1' ) self.assertEqual( volume.name, 'Test Volume' ) self.assertEqual( volume.size, 10 ) ''' Extra ''' self.assertEqual( extra['provisioning_state'], NodeState.RUNNING) ''' Extra metadata ''' self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['created_date'], '2016-10-18T07:20:41Z' ) self.assertEqual( extra['etag'], '33f6b8d506e7ad756e8554b915f29c61' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T07:20:41Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Volume' ) self.assertEqual( extra['type'], 'HDD' ) self.assertEqual( extra['size'], 10 ) self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['image'], 'bvol-img' ) self.assertEqual( extra['image_password'], None ) self.assertEqual( extra['ssh_keys'], None ) self.assertEqual( extra['bus'], 'VIRTIO' ) self.assertEqual( extra['licence_type'], 'LINUX' ) self.assertEqual( extra['cpu_hot_plug'], True ) self.assertEqual( extra['cpu_hot_unplug'], False ) self.assertEqual( extra['ram_hot_plug'], True ) self.assertEqual( extra['ram_hot_unplug'], False ) self.assertEqual( extra['nic_hot_plug'], True ) self.assertEqual( extra['nic_hot_unplug'], True ) self.assertEqual( extra['disc_virtio_hot_plug'], True ) self.assertEqual( extra['disc_virtio_hot_unplug'], True ) self.assertEqual( extra['disc_scsi_hot_plug'], False ) self.assertEqual( extra['disc_scsi_hot_unplug'], False ) self.assertEqual( extra['device_number'], 1 ) def test_ex_list_datacenters(self): datacenters = self.driver.ex_list_datacenters() self.assertEqual(len(datacenters), 1) datacenter = datacenters[0] extra = datacenter.extra ''' Standard properties ''' self.assertEqual( datacenter.id, 'dc-1' ) self.assertEqual( datacenter.href, '/cloudapi/v3/datacenters/dc-1' ) self.assertEqual( datacenter.name, 'Test One.' ) self.assertEqual( datacenter.version, 3 ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test One.' ) self.assertEqual( extra['description'], 'A test data center' ) self.assertEqual( extra['location'], 'de/fra' ) self.assertEqual( extra['version'], 3 ) self.assertEqual( extra['features'], ['SSD', 'MULTIPLE_CPU'] ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-14T07:24:59Z' ) self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['etag'], 'bdddec2287cb7723e86ac088bf644606' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T15:27:25Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'AVAILABLE' ) self.assertEqual( extra['provisioning_state'], NodeState.RUNNING ) self.assertEqual( len(extra['entities']), 4 ) self.assertNotIn( 'status_url', extra ) def test_list_snapshots(self): volume_snapshots = self.driver.list_snapshots() self.assertEqual(len(volume_snapshots), 1) snapshot = volume_snapshots[0] ''' Standard properties ''' self.assertEqual( snapshot.id, 'sshot' ) self.assertEqual( snapshot.size, 10 ) self.assertEqual( snapshot.created, '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.state, NodeState.RUNNING) self.assertEqual( snapshot.name, 'Balancer Testing 1 Storage-Snapshot-10/26/2016' ) ''' Extra properties ''' self.assertEqual( snapshot.extra['name'], 'Balancer Testing 1 Storage-Snapshot-10/26/2016' ) self.assertEqual( snapshot.extra['description'], ( 'Created from \"Balancer Testing 1' ' Storage\" in Data Center \"Snapshot\"' ) ) self.assertEqual( snapshot.extra['location'], 'us/las' ) self.assertEqual( snapshot.extra['size'], 10 ) self.assertEqual( snapshot.extra['cpu_hot_plug'], True ) self.assertEqual( snapshot.extra['cpu_hot_unplug'], False ) self.assertEqual( snapshot.extra['ram_hot_plug'], True ) self.assertEqual( snapshot.extra['ram_hot_unplug'], False ) self.assertEqual( snapshot.extra['nic_hot_plug'], True ) self.assertEqual( snapshot.extra['nic_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_plug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_scsi_hot_plug'], False ) self.assertEqual( snapshot.extra['disc_scsi_hot_unplug'], False ) self.assertEqual( snapshot.extra['licence_type'], 'LINUX' ) ''' Extra metadata ''' self.assertEqual( snapshot.extra['created_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['created_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['etag'], '01873262ac042b5f44ed33b4241225b4' ) self.assertEqual( snapshot.extra['last_modified_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['state'], 'AVAILABLE' ) ''' Function tests for operations on volume snapshots ''' def test_create_volume_snapshot(self): volume = self.driver.ex_describe_volume( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/vol-2' ) ) snapshot = self.driver.create_volume_snapshot(volume=volume) ''' Standard properties ''' self.assertEqual( snapshot.id, 'sshot' ) self.assertEqual( snapshot.size, 10 ) self.assertEqual( snapshot.created, '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.state, NodeState.PENDING ) self.assertEqual( snapshot.name, 'Test Created Snapshot' ) ''' Extra properties ''' self.assertEqual( snapshot.extra['name'], 'Test Created Snapshot' ) self.assertEqual( snapshot.extra['description'], 'Test Created Snapshot' ) self.assertEqual( snapshot.extra['location'], 'us/las' ) self.assertEqual( snapshot.extra['size'], 10 ) self.assertEqual( snapshot.extra['cpu_hot_plug'], True ) self.assertEqual( snapshot.extra['cpu_hot_unplug'], False ) self.assertEqual( snapshot.extra['ram_hot_plug'], True ) self.assertEqual( snapshot.extra['ram_hot_unplug'], False ) self.assertEqual( snapshot.extra['nic_hot_plug'], True ) self.assertEqual( snapshot.extra['nic_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_plug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_scsi_hot_plug'], False ) self.assertEqual( snapshot.extra['disc_scsi_hot_unplug'], False ) self.assertEqual( snapshot.extra['licence_type'], 'LINUX' ) ''' Extra metadata ''' self.assertEqual( snapshot.extra['created_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['created_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['etag'], '01873262ac042b5f44ed33b4241225b4' ) self.assertEqual( snapshot.extra['last_modified_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['state'], 'BUSY' ) def test_ex_describe_snapshot(self): snapshot_w_href = self.driver.ex_describe_snapshot( ex_href='/cloudapi/v3/snapshots/sshot' ) snapshot_w_id = self.driver.ex_describe_snapshot( ex_snapshot_id='sshot' ) self._verify_snapshot(snapshot=snapshot_w_href) self._verify_snapshot(snapshot=snapshot_w_id) def _verify_snapshot(self, snapshot): ''' Standard properties ''' self.assertEqual( snapshot.id, 'sshot' ) self.assertEqual( snapshot.size, 10 ) self.assertEqual( snapshot.created, '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.state, NodeState.RUNNING ) self.assertEqual( snapshot.name, 'Test Snapshot' ) ''' Extra properties ''' self.assertEqual( snapshot.extra['name'], 'Test Snapshot' ) self.assertEqual( snapshot.extra['description'], 'Test Snapshot' ) self.assertEqual( snapshot.extra['location'], 'us/las' ) self.assertEqual( snapshot.extra['size'], 10 ) self.assertEqual( snapshot.extra['cpu_hot_plug'], True ) self.assertEqual( snapshot.extra['cpu_hot_unplug'], False ) self.assertEqual( snapshot.extra['ram_hot_plug'], True ) self.assertEqual( snapshot.extra['ram_hot_unplug'], False ) self.assertEqual( snapshot.extra['nic_hot_plug'], True ) self.assertEqual( snapshot.extra['nic_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_plug'], True ) self.assertEqual( snapshot.extra['disc_virtio_hot_unplug'], True ) self.assertEqual( snapshot.extra['disc_scsi_hot_plug'], False ) self.assertEqual( snapshot.extra['disc_scsi_hot_unplug'], False ) self.assertEqual( snapshot.extra['licence_type'], 'LINUX' ) ''' Extra metadata ''' self.assertEqual( snapshot.extra['created_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['created_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['etag'], '01873262ac042b5f44ed33b4241225b4' ) self.assertEqual( snapshot.extra['last_modified_date'], '2016-10-26T11:38:45Z' ) self.assertEqual( snapshot.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( snapshot.extra['state'], 'AVAILABLE' ) def test_ex_update_snapshot(self): snapshot = self.driver.ex_describe_snapshot( ex_href='/cloudapi/v3/snapshots/sshot' ) updated = self.driver.ex_update_snapshot( snapshot=snapshot, name='Updated snapshot', description='Upated snapshot', cpu_hot_unplug=True ) self.assertEqual( updated.name, 'Updated snapshot' ) self.assertEqual( updated.extra['description'], 'Updated snapshot' ) self.assertEqual( updated.extra['cpu_hot_unplug'], True ) def test_destroy_volume_snapshot(self): snapshot = self.driver.ex_describe_snapshot( ex_href='/cloudapi/v3/snapshots/sshot' ) destroyed = self.driver.destroy_volume_snapshot(snapshot) self.assertTrue(destroyed) ''' Function tests for operations on nodes (servers) ''' def test_reboot_node(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/dc-1' '/servers/srv-1' ) ) rebooted = self.driver.reboot_node(node=node) self.assertTrue(rebooted) def test_create_node(self): image = self.driver.ex_describe_image( ex_href='/cloudapi/v3/images/img-2' ) datacenter = self.driver.ex_describe_datacenter( ex_href='/cloudapi/v3/datacenters/dc-1' ) sizes = self.driver.list_sizes() with self.assertRaises(ValueError): 'Raises value error if no size or ex_ram' self.driver.create_node( name='Test', image=image, ex_disk=40, ex_cores=1 ) with self.assertRaises(ValueError): 'Raises value error if no size or ex_cores' self.driver.create_node( name='Test', image=image, ex_disk=40, ex_ram=1024 ) with self.assertRaises(ValueError): 'Raises value error if no size or ex_disk' self.driver.create_node( name='Test', image=image, ex_cores=2, ex_ram=1024 ) with self.assertRaises(ValueError): 'Raises value error if no ssh keys or password' self.driver.create_node( name='Test', image=image, size=sizes[1], datacenter=datacenter ) node = self.driver.create_node( name='Test', image=image, size=sizes[1], ex_password='dummy1234', datacenter=datacenter ) extra = node.extra ''' Standard properties ''' self.assertEqual( node.id, 'srv-2' ) self.assertEqual( node.name, 'Test' ) self.assertEqual( node.state, NodeState.UNKNOWN ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T13:25:19Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '9bea2412ac556b402a07260fc0d1603f' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T13:25:19Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test' ) self.assertEqual( extra['cores'], 1 ) self.assertEqual( extra['ram'], 512 ) self.assertEqual( extra['availability_zone'], 'ZONE_1' ) self.assertEqual( extra['vm_state'], None ) self.assertEqual( extra['boot_cdrom'], None ) self.assertEqual( extra['boot_volume'], None ) self.assertEqual( extra['cpu_family'], 'INTEL_XEON' ) ''' Extra entities ''' self.assertEqual( len(extra['entities']['volumes']['items']), 1 ) def test_destroy_node(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) destroyed = self.driver.destroy_node( node=node, ex_remove_attached_disks=False ) self.assertTrue(destroyed) def test_ex_list_attached_volumes(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/servers/' 'srv-1' ) ) attached_volumes = self.driver.ex_list_attached_volumes(node) self.assertEqual(len(attached_volumes), 3) def test_attach_volume(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) volume = self.driver.ex_describe_volume( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/vol-2' ) ) attached = self.driver.attach_volume(node=node, volume=volume) extra = attached.extra ''' Standard properties ''' self.assertEqual( attached.id, 'vol-2' ) self.assertEqual( attached.name, 'Updated storage name' ) self.assertEqual( attached.size, 40 ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T13:13:36Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'c1800ce349033f9cd2c095ea1ea4976a' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T13:47:52Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated storage name' ) self.assertEqual( extra['type'], 'HDD' ) self.assertEqual( extra['size'], 40 ) self.assertEqual( extra['image'], 'bvol-img' ) self.assertEqual( extra['image_password'], None ) self.assertEqual( extra['ssh_keys'], None ) self.assertEqual( extra['bus'], 'VIRTIO' ) self.assertEqual( extra['licence_type'], 'LINUX' ) self.assertEqual( extra['cpu_hot_plug'], True ) self.assertEqual( extra['cpu_hot_unplug'], False ) self.assertEqual( extra['ram_hot_plug'], True ) self.assertEqual( extra['ram_hot_unplug'], False ) self.assertEqual( extra['nic_hot_plug'], True ) self.assertEqual( extra['nic_hot_unplug'], True ) self.assertEqual( extra['disc_virtio_hot_plug'], True ) self.assertEqual( extra['disc_virtio_hot_unplug'], True ) self.assertEqual( extra['disc_scsi_hot_plug'], False ) self.assertEqual( extra['disc_scsi_hot_unplug'], False ) self.assertEqual( extra['device_number'], 3 ) self.assertNotIn( 'availability_zone', extra ) def test_detach_volume(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) volume = self.driver.ex_describe_volume( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/vol-2' ) ) detached = self.driver.detach_volume( node=node, volume=volume ) self.assertTrue(detached) def test_ex_stop_node(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) stopped = self.driver.ex_stop_node(node) self.assertTrue(stopped) def test_ex_start_node(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) started = self.driver.ex_start_node(node) self.assertTrue(started) def test_ex_describe_node(self): node_w_href = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) node_w_id = self.driver.ex_describe_node( ex_datacenter_id='dc-1', ex_node_id='srv-1' ) self._verify_node(node=node_w_href) self._verify_node(node=node_w_id) def _verify_node(self, node): extra = node.extra ''' Standard properties ''' self.assertEqual( node.id, 'srv-1' ) self.assertEqual( node.name, 'A test node' ) self.assertEqual( node.state, NodeState.RUNNING ) self.assertEqual( node.public_ips, [] ) self.assertEqual( node.private_ips, [] ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['etag'], 'e7cf186125f51f3d9511754a40dcd12c') self.assertEqual( extra['last_modified_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['boot_cdrom'], None ) self.assertEqual( extra['boot_volume']['id'], 'bvol-1' ) self.assertEqual( extra['boot_volume']['href'], ( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/bvol-1' ) ) self.assertEqual( extra['boot_volume']['properties']['name'], 'A test node boot volume' ) self.assertEqual( extra['boot_volume']['properties']['type'], 'HDD' ) self.assertEqual( extra['boot_volume']['properties']['size'], 10 ) self.assertEqual( extra['boot_volume']['properties']['image'], 'bvol-img' ) self.assertEqual( extra['cpu_family'], 'AMD_OPTERON' ) ''' Other miscellaneous ''' self.assertEqual( len(extra['entities']), 3 ) self.assertNotIn( 'status_url', extra ) def test_ex_update_node(self): node = self.driver.ex_describe_node( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) zones = self.driver.ex_list_availability_zones() updated = self.driver.ex_update_node( node=node, name='Test update', cores=4, ram=4096, availability_zone=zones[0], ex_cpu_family='INTEL_XEON' ) extra = updated.extra ''' Standard properties ''' self.assertEqual( updated.id, 'srv-1' ) self.assertEqual( updated.name, 'A test node' ) self.assertEqual( updated.state, NodeState.RUNNING ) self.assertEqual( updated.public_ips, [] ) self.assertEqual( updated.private_ips, [] ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['etag'], 'e7cf186125f51f3d9511754a40dcd12c' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T07:28:05Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['boot_cdrom'], None ) self.assertEqual( extra['boot_volume']['id'], 'bvol-1' ) self.assertEqual( extra['boot_volume']['href'], ( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/bvol-1' ) ) self.assertEqual( extra['cpu_family'], 'AMD_OPTERON' ) ''' Function tests for operations on volumes ''' def test_create_volume(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) image = self.driver.ex_describe_image( ex_href='/cloudapi/v3/images/img-2' ) created = self.driver.create_volume( size=30, name='Test volume', ex_type='HDD', ex_bus_type='IDE', ex_datacenter=datacenter, image=image, ex_password='dummyP8ssw0rdl33t' ) self.assertTrue(created) def test_destroy_volume(self): volume = self.driver.ex_describe_volume( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/volumes/' 'vol-2' ) ) destroyed = self.driver.destroy_volume(volume=volume) self.assertTrue(destroyed) def test_ex_update_volume(self): volume = self.driver.ex_describe_volume( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/vol-2' ) ) updated = self.driver.ex_update_volume( volume=volume, ex_storage_name='Updated volume', size=48, ex_bus_type='VIRTIO' ) extra = updated.extra ''' Standard properties ''' self.assertEqual( updated.id, 'vol-2' ) self.assertEqual( updated.name, 'Updated storage name' ) self.assertEqual( updated.size, 40 ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T13:13:36Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'c1800ce349033f9cd2c095ea1ea4976a' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T13:47:52Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated storage name' ) self.assertEqual( extra['type'], 'HDD' ) self.assertEqual( extra['size'], 40 ) self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['image'], 'bvol-img' ) self.assertEqual( extra['image_password'], None ) self.assertEqual( extra['ssh_keys'], None ) self.assertEqual( extra['bus'], 'VIRTIO' ) self.assertEqual( extra['licence_type'], 'LINUX' ) self.assertEqual( extra['cpu_hot_plug'], True ) self.assertEqual( extra['cpu_hot_unplug'], False ) self.assertEqual( extra['ram_hot_plug'], True ) self.assertEqual( extra['ram_hot_unplug'], False ) self.assertEqual( extra['nic_hot_plug'], True ) self.assertEqual( extra['nic_hot_unplug'], True ) self.assertEqual( extra['disc_virtio_hot_plug'], True ) self.assertEqual( extra['disc_virtio_hot_unplug'], True ) self.assertEqual( extra['disc_scsi_hot_plug'], False ) self.assertEqual( extra['disc_scsi_hot_unplug'], False ) self.assertEqual( extra['device_number'], 3 ) return {} def test_ex_describe_volume(self): volume_w_href = self.driver.ex_describe_volume( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'volumes/vol-2' ) ) volume_w_id = self.driver.ex_describe_volume( ex_datacenter_id='dc-1', ex_volume_id='vol-2' ) self._verify_volume(volume=volume_w_href) self._verify_volume(volume=volume_w_id) def _verify_volume(self, volume): extra = volume.extra ''' Standard properties ''' self.assertEqual( volume.id, 'vol-2' ) self.assertEqual( volume.name, 'Updated storage name' ) self.assertEqual( volume.size, 40 ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T13:13:36Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'c1800ce349033f9cd2c095ea1ea4976a' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T13:47:52Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated storage name' ) self.assertEqual( extra['type'], 'HDD' ) self.assertEqual( extra['size'], 40 ) self.assertEqual( extra['availability_zone'], 'AUTO' ) self.assertEqual( extra['image'], 'bvol-img' ) self.assertEqual( extra['image_password'], None ) self.assertEqual( extra['ssh_keys'], None ) self.assertEqual( extra['bus'], 'VIRTIO' ) self.assertEqual( extra['licence_type'], 'LINUX' ) self.assertEqual( extra['cpu_hot_plug'], True ) self.assertEqual( extra['cpu_hot_unplug'], False ) self.assertEqual( extra['ram_hot_plug'], True ) self.assertEqual( extra['ram_hot_unplug'], False ) self.assertEqual( extra['nic_hot_plug'], True ) self.assertEqual( extra['nic_hot_unplug'], True ) self.assertEqual( extra['disc_virtio_hot_plug'], True ) self.assertEqual( extra['disc_virtio_hot_unplug'], True ) self.assertEqual( extra['disc_scsi_hot_plug'], False ) self.assertEqual( extra['disc_scsi_hot_unplug'], False ) self.assertEqual( extra['device_number'], 3 ) self.assertNotIn( 'status_url', extra ) ''' Function tests for operations on data centers ''' def test_ex_create_datacenter(self): location = self.driver.ex_describe_location(ex_location_id='de/fkb') datacenter = self.driver.ex_create_datacenter( name='Test Data Center', location=location, description='Test Data Center.' ) extra = datacenter.extra ''' Standard properties ''' self.assertEqual( datacenter.id, 'dc-1' ) self.assertEqual( datacenter.href, '/cloudapi/v3/datacenters/dc-1' ) self.assertEqual( datacenter.name, 'Test Data Center' ) self.assertEqual( datacenter.version, None ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-18T17:20:56Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'c2d3d4d9bbdc0fff7d3c5c3864a68a46' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T17:20:56Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Data Center' ) self.assertEqual( extra['description'], 'Test Data Center.' ) self.assertEqual( extra['location'], 'us/las' ) self.assertEqual( extra['version'], None ) self.assertEqual( extra['features'], [] ) ''' Miscellaneous properties ''' self.assertNotIn( 'entities', extra ) self.assertEqual( extra['provisioning_state'], NodeState.PENDING ) def test_ex_destroy_datacenter(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) destroyed = self.driver.ex_destroy_datacenter( datacenter=datacenter ) self.assertTrue(destroyed) def test_ex_describe_datacenter(self): datacenter_w_href = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) datacenter_w_id = self.driver.ex_describe_datacenter( ex_datacenter_id='dc-1' ) self._verify_datacenter(datacenter=datacenter_w_href) self._verify_datacenter(datacenter=datacenter_w_id) def _verify_datacenter(self, datacenter): extra = datacenter.extra ''' Standard properties ''' self.assertEqual( datacenter.id, 'dc-1' ) self.assertEqual( datacenter.href, '/cloudapi/v3/datacenters/dc-1' ) self.assertEqual( datacenter.name, 'Test Data Center' ) self.assertEqual( datacenter.version, 35 ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T11:33:11Z' ) self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['etag'], '53b215b8ec0356a649955dab019845a4' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T15:13:44Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Data Center' ) self.assertEqual( extra['description'], 'This is a test data center.' ) self.assertEqual( extra['location'], 'de/fkb' ) self.assertEqual( extra['version'], 35 ) self.assertEqual( extra['features'], ['SSD', 'MULTIPLE_CPU'] ) self.assertNotIn( 'status_url', extra ) self.assertEqual( extra['provisioning_state'], NodeState.RUNNING ) self.assertEqual( len(extra['entities']), 4 ) def test_ex_rename_datacenter(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) renamed = self.driver.ex_rename_datacenter( datacenter=datacenter, name='Renamed data center' ) extra = renamed.extra ''' Standard properties ''' self.assertEqual( renamed.id, 'dc-1' ) self.assertEqual( renamed.href, '/cloudapi/v3/datacenters/dc-1' ) self.assertEqual( renamed.name, 'Test Data Center' ) self.assertEqual( renamed.version, 35 ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T11:33:11Z' ) self.assertEqual( extra['created_by'], 'test@test.test' ) self.assertEqual( extra['etag'], '53b215b8ec0356a649955dab019845a4' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T15:13:44Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.test' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Data Center' ) self.assertEqual( extra['description'], 'This is a test data center.' ) self.assertEqual( extra['location'], 'de/fkb' ) self.assertEqual( extra['version'], 35 ) self.assertEqual( extra['features'], ['SSD', 'MULTIPLE_CPU'] ) self.assertNotIn( 'status_url', extra ) self.assertEqual( extra['provisioning_state'], NodeState.PENDING ) self.assertEqual( len(extra['entities']), 4 ) ''' Function tests for operations on images ''' def test_ex_describe_image(self): image_w_href = self.driver.ex_describe_image( ex_href=( '/cloudapi/v3/images/' 'img-2' ) ) image_w_id = self.driver.ex_describe_image( ex_image_id='img-2' ) self._verify_image(image=image_w_href) self._verify_image(image=image_w_id) def _verify_image(self, image): extra = image.extra ''' Standard properties ''' self.assertEqual( image.id, 'img-2' ) self.assertEqual( image.name, 'vivid-server-cloudimg-amd64-disk1.img' ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2015-10-09T12:06:34Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'bbf76112358af2fc5dd1bf21de8988db' ) self.assertEqual( extra['last_modified_date'], '2015-11-11T15:23:20Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'vivid-server-cloudimg-amd64-disk1.img' ) self.assertEqual( extra['description'], None ) self.assertEqual( extra['location'], 'de/fkb' ) self.assertEqual( extra['size'], 2 ) self.assertEqual( extra['cpu_hot_plug'], False ) self.assertEqual( extra['cpu_hot_unplug'], False ) self.assertEqual( extra['ram_hot_plug'], False ) self.assertEqual( extra['ram_hot_unplug'], False ) self.assertEqual( extra['nic_hot_plug'], False ) self.assertEqual( extra['nic_hot_unplug'], False ) self.assertEqual( extra['disc_virtio_hot_plug'], False ) self.assertEqual( extra['disc_virtio_hot_unplug'], False ) self.assertEqual( extra['disc_scsi_hot_plug'], False ) self.assertEqual( extra['disc_scsi_hot_unplug'], False ) self.assertEqual( extra['licence_type'], 'UNKNOWN' ) self.assertEqual( extra['image_type'], 'HDD' ) self.assertEqual( extra['public'], False ) self.assertEqual( extra['href'], '/cloudapi/v3/images/img-2' ) def test_ex_update_image(self): image = self.driver.ex_describe_image( ex_href=( '/cloudapi/v3/images/' 'img-2' ) ) updated = self.driver.ex_update_image( image=image, name='my-updated-image.img' ) extra = updated.extra self.assertEqual( updated.name, 'my-updated-image.img' ) self.assertEqual( extra['last_modified_date'], '2016-11-11T15:23:20Z' ) def test_ex_delete_image(self): image = self.driver.ex_describe_image( ex_href=( '/cloudapi/v3/images/' 'img-2' ) ) deleted = self.driver.ex_delete_image(image) self.assertTrue(deleted) ''' Function tests for operations on network interfaces ''' def test_ex_list_network_interfaces(self): network_interfaces = self.driver.ex_list_network_interfaces() self.assertEqual( len(network_interfaces), 4 ) network_interface = network_interfaces[0] extra = network_interface.extra ''' Standard properties ''' self.assertEqual( network_interface.id, 'nic-1' ) self.assertEqual( network_interface.name, 'Test network interface' ) self.assertEqual( network_interface.href, ( '/cloudapi/v3/datacenters/' 'dc-1/servers/' 's-3/nics/' 'nic-1' ) ) self.assertEqual( network_interface.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T15:46:38Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'dbd8216137cf0ec9951170f93fa8fa53' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T18:19:43Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test network interface' ) self.assertEqual( extra['mac'], '02:01:0b:9d:4d:ce' ) self.assertEqual( extra['ips'], ['10.15.124.11'] ) self.assertEqual( extra['dhcp'], False ) self.assertEqual( extra['lan'], 2 ) self.assertEqual( extra['firewall_active'], True ) self.assertEqual( extra['nat'], False ) def test_ex_describe_network_interface(self): nic_w_href = self.driver.ex_describe_network_interface( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) nic_w_id = self.driver.ex_describe_network_interface( ex_datacenter_id='dc-1', ex_server_id='s-3', ex_nic_id='nic-2' ) self._verify_network_interface(network_interface=nic_w_href) self._verify_network_interface(network_interface=nic_w_id) def _verify_network_interface(self, network_interface): extra = network_interface.extra ''' Standard properties ''' self.assertEqual( network_interface.id, 'nic-2' ) self.assertEqual( network_interface.name, 'Updated from LibCloud' ) self.assertEqual( network_interface.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) self.assertEqual( network_interface.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T15:46:38Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'dbd8216137cf0ec9951170f93fa8fa53' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T18:19:43Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated from LibCloud' ) self.assertEqual( extra['mac'], '02:01:0b:9d:4d:ce' ) self.assertEqual( extra['ips'], ['10.15.124.11'] ) self.assertEqual( extra['dhcp'], False ) self.assertEqual( extra['lan'], 2 ) self.assertEqual( extra['firewall_active'], True ) self.assertEqual( extra['nat'], False ) ''' Miscellaneous ''' self.assertTrue( len(extra['entities']), 1 ) def test_ex_create_network_interface(self): node = self.driver.ex_describe_node( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' ) ) network_interface = self.driver.ex_create_network_interface( node=node, lan_id=1, dhcp_active=True, nic_name='Creating a test network interface.' ) extra = network_interface.extra ''' Standard properties ''' self.assertEqual( network_interface.id, 'nic-2' ) self.assertEqual( network_interface.name, 'Creating a test network interface.' ) self.assertEqual( network_interface.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/srv-1' '/nics/nic-2' ) ) self.assertEqual( network_interface.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T08:18:50Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '8679142b0b1b70c8b8c09a8b31e6ded9' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T08:18:50Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Creating a test network interface.' ) self.assertEqual( extra['mac'], None ) self.assertEqual( extra['ips'], [] ) self.assertEqual( extra['dhcp'], True ) self.assertEqual( extra['lan'], 1 ) self.assertEqual( extra['firewall_active'], None ) self.assertEqual( extra['nat'], None ) def test_ex_update_network_interface(self): network_interface = self.driver.ex_describe_network_interface( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) updated = self.driver.ex_update_network_interface( network_interface=network_interface, name='New network interface', dhcp_active=False ) extra = updated.extra ''' Standard properties ''' self.assertEqual( updated.id, 'nic-2' ) self.assertEqual( updated.name, 'Updated from LibCloud' ) self.assertEqual( updated.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) self.assertEqual( updated.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T08:18:55Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '56f8d8bbdc84faad4188f647a49a565b' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T09:44:59Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated from LibCloud' ) self.assertEqual( extra['mac'], '02:01:68:c1:e8:88' ) self.assertEqual( extra['ips'], ['11.12.13.14'] ) self.assertEqual( extra['dhcp'], True ) self.assertEqual( extra['lan'], 1 ) self.assertEqual( extra['firewall_active'], False ) self.assertEqual( extra['nat'], False ) self.assertTrue(updated) def test_ex_destroy_network_interface(self): network_interface = self.driver.ex_describe_network_interface( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) destroyed = self.driver.ex_destroy_network_interface( network_interface=network_interface ) self.assertTrue(destroyed) def test_ex_set_inet_access(self): network_interface = self.driver.ex_describe_network_interface( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) updated = self.driver.ex_set_inet_access( network_interface=network_interface, internet_access=False) self.assertTrue(updated) return {} ''' Function tests for operations on locations ''' def test_ex_describe_location(self): location_w_href = self.driver.ex_describe_location( ex_href=( '/cloudapi/v3/locations/de/fkb' ) ) location_w_id = self.driver.ex_describe_location( ex_location_id='de/fkb' ) self._verify_location(location=location_w_href) self._verify_location(location=location_w_id) def _verify_location(self, location): self.assertEqual( location.id, 'de/fkb' ) self.assertEqual( location.name, 'karlsruhe' ) self.assertEqual( location.country, 'de' ) ''' Function tests for operations on firewall rules ''' def test_ex_list_firewall_rules(self): network_interface = self.driver.ex_describe_network_interface( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) firewall_rules = self.driver.ex_list_firewall_rules(network_interface) self.assertEqual( len(firewall_rules), 3 ) firewall_rule = firewall_rules[0] extra = firewall_rule.extra ''' Standard properties ''' self.assertEqual( firewall_rule.id, 'fwr-1' ) self.assertEqual( firewall_rule.name, 'Test updated firewall rule' ) self.assertEqual( firewall_rule.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2/' 'firewallrules/fwr-1' ) ) self.assertEqual( firewall_rule.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T11:08:10Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'b91a2e082a7422dafb79d84a07fb2a28' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T11:19:04Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test updated firewall rule' ) self.assertEqual( extra['protocol'], 'TCP' ) self.assertEqual( extra['source_mac'], None ) self.assertEqual( extra['source_ip'], None ) self.assertEqual( extra['target_ip'], None ) self.assertEqual( extra['icmp_code'], None ) self.assertEqual( extra['icmp_type'], None ) self.assertEqual( extra['port_range_start'], 80 ) self.assertEqual( extra['port_range_end'], 80 ) def test_ex_describe_firewall_rule(self): firewall_rule_w_href = self.driver.ex_describe_firewall_rule( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/servers/' 's-3/nics/' 'nic-2/firewallrules' '/fw2' ) ) firewall_rule_w_id = self.driver.ex_describe_firewall_rule( ex_datacenter_id='dc-1', ex_server_id='s-3', ex_nic_id='nic-2', ex_firewall_rule_id='fw2' ) self._verify_firewall_rule(firewall_rule=firewall_rule_w_href) self._verify_firewall_rule(firewall_rule=firewall_rule_w_id) def _verify_firewall_rule(self, firewall_rule): extra = firewall_rule.extra ''' Standard properties ''' self.assertEqual( firewall_rule.id, 'fw2' ) self.assertEqual( firewall_rule.name, 'HTTPs (SSL)' ) self.assertEqual( firewall_rule.href, ( '/cloudapi/v3/datacenters/' 'dc-1/servers/' 's-3/nics/' 'nic-2/' 'firewallrules/fw2' ) ) self.assertEqual( firewall_rule.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T09:55:10Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '00bb5b86562db1ed19ca38697e485160' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T09:55:10Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'HTTPs (SSL)' ) self.assertEqual( extra['protocol'], 'TCP' ) self.assertEqual( extra['source_mac'], None ) self.assertEqual( extra['source_ip'], None ) self.assertEqual( extra['target_ip'], None ) self.assertEqual( extra['icmp_code'], None ) self.assertEqual( extra['icmp_type'], None ) self.assertEqual( extra['port_range_start'], 443 ) self.assertEqual( extra['port_range_end'], 443 ) def test_ex_create_firewall_rule(self): network_interface = self.driver.ex_describe_network_interface( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) firewall_rule = self.driver.ex_create_firewall_rule( network_interface=network_interface, protocol='TCP', name='Test created firewall rule', port_range_start=80, port_range_end=80 ) extra = firewall_rule.extra ''' Standard properties ''' self.assertEqual( firewall_rule.id, 'fwr-1' ) self.assertEqual( firewall_rule.name, 'Test created firewall rule' ) self.assertEqual( firewall_rule.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2/' 'firewallrules/fwr-1' ) ) self.assertEqual( firewall_rule.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T11:08:04Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '2a21551ba4adf85d9fb04b05a6938bcc' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T11:08:04Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test created firewall rule' ) self.assertEqual( extra['protocol'], 'TCP' ) self.assertEqual( extra['source_mac'], None ) self.assertEqual( extra['source_ip'], None ) self.assertEqual( extra['target_ip'], None ) self.assertEqual( extra['icmp_code'], None ) self.assertEqual( extra['icmp_type'], None ) self.assertEqual( extra['port_range_start'], 80 ) self.assertEqual( extra['port_range_end'], 80 ) def test_ex_update_firewall_rule(self): firewall_rule = self.driver.ex_describe_firewall_rule( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2/' 'firewallrules/fw2' ) ) updated = self.driver.ex_update_firewall_rule( firewall_rule=firewall_rule, name='Test updated firewall rule', port_range_start=8080, port_range_end=8080 ) extra = updated.extra ''' Standard properties ''' self.assertEqual( updated.id, 'fw2' ) self.assertEqual( updated.name, 'HTTPs (SSL)' ) self.assertEqual( updated.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2/' 'firewallrules/fw2' ) ) self.assertEqual( updated.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-19T09:55:10Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '00bb5b86562db1ed19ca38697e485160' ) self.assertEqual( extra['last_modified_date'], '2016-10-19T09:55:10Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'HTTPs (SSL)' ) self.assertEqual( extra['protocol'], 'TCP' ) self.assertEqual( extra['source_mac'], None ) self.assertEqual( extra['source_ip'], None ) self.assertEqual( extra['target_ip'], None ) self.assertEqual( extra['icmp_code'], None ) self.assertEqual( extra['icmp_type'], None ) self.assertEqual( extra['port_range_start'], 443 ) self.assertEqual( extra['port_range_end'], 443 ) def test_ex_delete_firewall_rule(self): firewall_rule = self.driver.ex_describe_firewall_rule( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2/' 'firewallrules/fw2' ) ) deleted = self.driver.ex_delete_firewall_rule(firewall_rule) self.assertTrue(deleted) ''' Function tests for operations on lans ''' def test_ex_list_lans(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) lans = self.driver.ex_list_lans( datacenter=datacenter ) lan = lans[0] extra = lan.extra self.assertEqual( len(lans), 1 ) ''' Standard properties ''' self.assertEqual( lan.id, '1' ) self.assertEqual( lan.href, ( '/cloudapi/v3/datacenters/' 'dc-1/lans/1' ) ) self.assertEqual( lan.name, 'Switch for LAN 1' ) self.assertEqual( lan.is_public, False ) self.assertEqual( lan.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-24T08:03:22Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['last_modified_date'], '2016-10-24T08:03:22Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Switch for LAN 1' ) self.assertEqual( extra['is_public'], False ) ''' Miscellaneous ''' self.assertEqual( len(extra['entities']), 1 ) def test_ex_create_lan(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) lan = self.driver.ex_create_lan( datacenter=datacenter, is_public=True ) extra = lan.extra ''' Standard properties ''' self.assertEqual( lan.id, '10' ) self.assertEqual( lan.href, ( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) self.assertEqual( lan.name, 'Test Created Lan' ) self.assertEqual( lan.is_public, True ) self.assertEqual( lan.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T11:33:11Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '53b215b8ec0356a649955dab019845a4' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T15:13:44Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Created Lan' ) self.assertEqual( extra['is_public'], True ) def test_ex_describe_lan(self): lan_w_href = self.driver.ex_describe_lan( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) lan_w_id = self.driver.ex_describe_lan( ex_datacenter_id='dc-1', ex_lan_id='10' ) self._verify_lan(lan=lan_w_href) self._verify_lan(lan=lan_w_id) def _verify_lan(self, lan): extra = lan.extra ''' Standard properties ''' self.assertEqual( lan.id, '10' ) self.assertEqual( lan.href, ( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) self.assertEqual( lan.name, 'Test Created Lan' ) self.assertEqual( lan.is_public, True ) self.assertEqual( lan.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T11:33:11Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '53b215b8ec0356a649955dab019845a4' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T15:13:44Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Created Lan' ) self.assertEqual( extra['is_public'], True ) def test_ex_update_lan(self): lan = self.driver.ex_describe_lan( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) updated = self.driver.ex_update_lan( lan=lan, is_public=True, name='Updated Lan' ) extra = updated.extra ''' Standard properties ''' self.assertEqual( updated.id, '10' ) self.assertEqual( updated.href, ( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) self.assertEqual( updated.name, 'Test Updated Lan' ) self.assertEqual( updated.is_public, True ) self.assertEqual( updated.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T11:33:11Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '53b215b8ec0356a649955dab019845a4' ) self.assertEqual( extra['last_modified_date'], '2016-10-18T15:13:44Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test Updated Lan' ) self.assertEqual( extra['is_public'], True ) def test_ex_delete_lan(self): lan = self.driver.ex_describe_lan( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/lans/10' ) ) deleted = self.driver.ex_delete_lan(lan) self.assertTrue(deleted) ''' Function tests for operations on load balancers ''' def test_ex_list_load_balancers(self): load_balancers = self.driver.ex_list_load_balancers() self.assertEqual( len(load_balancers), 2 ) balancer = load_balancers[0] ''' Standard properties ''' self.assertEqual( balancer.id, 'bal-1' ) self.assertEqual( balancer.href, ( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) self.assertEqual( balancer.name, 'Test One' ) self.assertEqual( balancer.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( balancer.extra['created_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( balancer.extra['created_by'], 'test@test.te' ) self.assertEqual( balancer.extra['etag'], '71e8df57a58615b9e15400ede4138b41' ) self.assertEqual( balancer.extra['last_modified_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( balancer.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( balancer.extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( balancer.extra['name'], 'Test One' ) self.assertEqual( balancer.extra['ip'], '111.112.113.114' ) self.assertEqual( balancer.extra['dhcp'], True ) def test_ex_describe_load_balancer(self): load_balancer_w_href = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) load_balancer_w_id = self.driver.ex_describe_load_balancer( ex_datacenter_id='dc-2', ex_load_balancer_id='bal-1' ) self._verify_load_balancer(load_balancer=load_balancer_w_href) self._verify_load_balancer(load_balancer=load_balancer_w_id) def _verify_load_balancer(self, load_balancer): ''' Standard properties ''' self.assertEqual( load_balancer.id, 'bal-1' ) self.assertEqual( load_balancer.href, ( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) self.assertEqual( load_balancer.name, 'Test One' ) self.assertEqual( load_balancer.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( load_balancer.extra['created_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( load_balancer.extra['created_by'], 'test@test.te' ) self.assertEqual( load_balancer.extra['etag'], '71e8df57a58615b9e15400ede4138b41' ) self.assertEqual( load_balancer.extra['last_modified_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( load_balancer.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( load_balancer.extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( load_balancer.extra['name'], 'Test One' ) self.assertEqual( load_balancer.extra['ip'], '111.112.113.114' ) self.assertEqual( load_balancer.extra['dhcp'], True ) def test_ex_create_load_balancer(self): datacenter = self.driver.ex_describe_datacenter( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1' ) ) created = self.driver.ex_create_load_balancer( datacenter=datacenter, name='Test load balancer', ip='10.11.12.13', dhcp=True ) ''' Standard properties ''' self.assertEqual( created.id, 'bal-1' ) self.assertEqual( created.href, ( '/cloudapi/v3/datacenters' '/dc-1' '/loadbalancers/bal-1' ) ) self.assertEqual( created.name, 'Test load balancer' ) self.assertEqual( created.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( created.extra['created_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( created.extra['created_by'], 'test@test.te' ) self.assertEqual( created.extra['etag'], '71e8df57a58615b9e15400ede4138b41' ) self.assertEqual( created.extra['last_modified_date'], '2016-10-26T13:02:33Z' ) self.assertEqual( created.extra['last_modified_by'], 'test@test.te' ) self.assertEqual( created.extra['state'], 'BUSY' ) ''' Extra properties ''' self.assertEqual( created.extra['name'], 'Test load balancer' ) self.assertEqual( created.extra['ip'], None ) self.assertEqual( created.extra['dhcp'], True ) def test_ex_update_load_balancer(self): load_balancer = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) updated = self.driver.ex_update_load_balancer( load_balancer=load_balancer, name='Updated Load Balancer', ip='123.124.125.126', dhcp=False ) self.assertEqual( updated.name, 'Updated Load Balancer' ) self.assertEqual( updated.extra['ip'], '123.124.125.126' ) self.assertEqual( updated.extra['dhcp'], False ) def test_ex_list_load_balanced_nics(self): load_balancer = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) network_interfaces = self.driver.ex_list_load_balanced_nics( load_balancer ) network_interface = network_interfaces[0] extra = network_interface.extra ''' Standard properties ''' self.assertEqual( network_interface.id, 'nic-1' ) self.assertEqual( network_interface.name, 'Test network interface' ) self.assertEqual( network_interface.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-1' ) ) self.assertEqual( network_interface.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T15:46:38Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'dbd8216137cf0ec9951170f93fa8fa53' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T18:19:43Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Test network interface' ) self.assertEqual( extra['mac'], '02:01:0b:9d:4d:ce' ) self.assertEqual( extra['ips'], ['10.15.124.11'] ) self.assertEqual( extra['dhcp'], False ) self.assertEqual( extra['lan'], 2 ) self.assertEqual( extra['firewall_active'], True ) self.assertEqual( extra['nat'], False ) def test_ex_describe_load_balanced_nic(self): network_interface_w_href = self.driver.ex_describe_network_interface( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) network_interface_w_id = self.driver.ex_describe_network_interface( ex_datacenter_id='dc-1', ex_server_id='s-3', ex_nic_id='nic-2' ) self._verify_load_balanced_nic( network_interface=network_interface_w_href ) self._verify_load_balanced_nic( network_interface=network_interface_w_id ) def _verify_load_balanced_nic(self, network_interface): extra = network_interface.extra ''' Standard properties ''' self.assertEqual( network_interface.id, 'nic-2' ) self.assertEqual( network_interface.name, 'Updated from LibCloud' ) self.assertEqual( network_interface.href, ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) self.assertEqual( network_interface.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-17T15:46:38Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'dbd8216137cf0ec9951170f93fa8fa53' ) self.assertEqual( extra['last_modified_date'], '2016-10-17T18:19:43Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) ''' Extra properties ''' self.assertEqual( extra['name'], 'Updated from LibCloud' ) self.assertEqual( extra['mac'], '02:01:0b:9d:4d:ce' ) self.assertEqual( extra['ips'], ['10.15.124.11'] ) self.assertEqual( extra['dhcp'], False ) self.assertEqual( extra['lan'], 2 ) self.assertEqual( extra['firewall_active'], True ) self.assertEqual( extra['nat'], False ) def test_ex_attach_nic_to_load_balancer(self): network_interface = self.driver.ex_describe_network_interface( ex_href=( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3' '/nics/nic-2' ) ) load_balancer = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) attached = self.driver.ex_attach_nic_to_load_balancer( load_balancer=load_balancer, network_interface=network_interface ) self.assertTrue(attached) def test_ex_remove_nic_from_load_balancer(self): network_interface = self.driver.ex_describe_network_interface( ex_href=( ( '/cloudapi/v3/datacenters/' 'dc-1/' 'servers/s-3/' 'nics/nic-2' ) ) ) load_balancer = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) detached = self.driver.ex_remove_nic_from_load_balancer( load_balancer=load_balancer, network_interface=network_interface ) self.assertTrue(detached) def test_ex_delete_load_balancer(self): load_balancer = self.driver.ex_describe_load_balancer( ex_href=( '/cloudapi/v3/datacenters/' 'dc-2/' 'loadbalancers/bal-1' ) ) deleted = self.driver.ex_delete_load_balancer(load_balancer) self.assertTrue(deleted) ''' Function tests for operations on IP blocks ''' def test_ex_list_ip_blocks(self): ip_blocks = self.driver.ex_list_ip_blocks() self.assertEqual( len(ip_blocks), 2 ) ip_block = ip_blocks[0] extra = ip_block.extra ''' Standard properties ''' self.assertEqual( ip_block.id, 'ipb-1' ) self.assertEqual( ip_block.name, 'Test IP Block One' ) self.assertEqual( ip_block.href, '/cloudapi/v3/ipblocks/ipb-1' ) self.assertEqual( ip_block.location, 'de/fkb' ) self.assertEqual( ip_block.size, 2 ) self.assertEqual( ip_block.ips, ['78.137.101.252', '78.137.101.251'] ) self.assertEqual( ip_block.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-26T15:05:36Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'acbf00bacf7ee48d4b8bc4e7413e1f30' ) self.assertEqual( extra['last_modified_date'], '2016-10-26T15:05:36Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) def test_ex_create_ip_block(self): location = self.driver.ex_describe_location(ex_location_id='de/fkb') created = self.driver.ex_create_ip_block( location=location, size=2, name='Test Created IP Block' ) extra = created.extra ''' Standard properties ''' self.assertEqual( created.id, 'ipb-1' ) self.assertEqual( created.name, 'Test Created IP Block' ) self.assertEqual( created.href, '/cloudapi/v3/ipblocks/ipb-1' ) self.assertEqual( created.location, 'de/fkb' ) self.assertEqual( created.size, 2 ) self.assertEqual( created.ips, ['11.12.13.14', '15.16.17.18'] ) self.assertEqual( created.state, NodeState.PENDING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-26T15:05:36Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], 'acbf00bacf7ee48d4b8bc4e7413e1f30' ) self.assertEqual( extra['last_modified_date'], '2016-10-26T15:05:36Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'BUSY' ) def test_ex_describe_ip_block(self): ip_block_w_href = self.driver.ex_describe_ip_block( ex_href=( '/cloudapi/v3/ipblocks/' 'ipb-2' ) ) ip_block_w_id = self.driver.ex_describe_ip_block( ex_ip_block_id='ipb-2' ) self._verify_ip_block(ip_block=ip_block_w_href) self._verify_ip_block(ip_block=ip_block_w_id) def _verify_ip_block(self, ip_block): extra = ip_block.extra ''' Standard properties ''' self.assertEqual( ip_block.id, 'ipb-2' ) self.assertEqual( ip_block.name, 'Test IP Block One' ) self.assertEqual( ip_block.href, ( '/cloudapi/v3/ipblocks/ipb-2' ) ) self.assertEqual( ip_block.location, 'de/fkb' ) self.assertEqual( ip_block.size, 1 ) self.assertEqual( ip_block.ips, ['78.137.101.250'] ) self.assertEqual( ip_block.state, NodeState.RUNNING ) ''' Extra metadata ''' self.assertEqual( extra['created_date'], '2016-10-26T15:05:12Z' ) self.assertEqual( extra['created_by'], 'test@test.te' ) self.assertEqual( extra['etag'], '43e05b766899950bc8a5aeee0fd89b05' ) self.assertEqual( extra['last_modified_date'], '2016-10-26T15:05:12Z' ) self.assertEqual( extra['last_modified_by'], 'test@test.te' ) self.assertEqual( extra['state'], 'AVAILABLE' ) def test_ex_delete_ip_block(self): ip_block = self.driver.ex_describe_ip_block( ex_href=( '/cloudapi/v3/ipblocks/' 'ipb-2' ) ) deleted = self.driver.ex_delete_ip_block(ip_block) self.assertTrue(deleted) class ProfitBricksMockHttp(MockHttp): fixtures = ComputeFileFixtures('profitbricks') ''' Operations on images GET - fetches images ''' def _cloudapi_v3_images( self, method, url, body, headers ): body = self.fixtures.load('list_images.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) ''' Operations on locations GET - fetches locations ''' def _cloudapi_v3_locations( self, method, url, body, headers ): body = self.fixtures.load('list_locations.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) ''' Operations on a data centers GET - fetches data centers PATCH - creates a data center ''' def _cloudapi_v3_datacenters( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_datacenters.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_datacenter.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a data center GET - fetches a data center DELETE - destroys a data center PATCH - updates a data center ''' def _cloudapi_v3_datacenters_dc_1( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_datacenter.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_rename_datacenter.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on data center nodes (servers) GET - fetches a list of nodes (servers) for a data center POST - creates a node (server) for a data center ''' def _cloudapi_v3_datacenters_dc_1_servers( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('list_nodes.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('create_node.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on data center volumes GET - fetches a list of volumes for a data center POST - creates a volume for a data center ''' def _cloudapi_v3_datacenters_dc_1_volumes( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('list_volumes.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('create_volume.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a node (server) GET - fetches a node (server) DELETE - destroys a node (server) PATCH - updates a node ''' def _cloudapi_v3_datacenters_dc_1_servers_srv_1( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_node.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_node.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a node (server) POST - reboots, then starts and stops a node ''' 'reboot a node' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_reboot( self, method, url, body, headers ): return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) 'start a node' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_stop( self, method, url, body, headers ): return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) 'stop a node' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_start( self, method, url, body, headers ): return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) """ Operations on an image GET - fetches an image DELETE - deletes an image PATCH - updates an image """ def _cloudapi_v3_images_img_2( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_image.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_image.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a volume GET - fetches a volume DELETE - destroys a volume PATCH - updates a volume ''' def _cloudapi_v3_datacenters_dc_1_volumes_vol_2( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_volume.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_volume.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a volume connected to a node (server) DELETE - destroys the link between a volume and a server but does delete the volume. ''' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_volumes_vol_2( self, method, url, body, headers ): return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a location GET - fetches a location ''' def _cloudapi_v3_locations_de_fkb( self, method, url, body, headers ): body = self.fixtures.load('ex_describe_location.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) ''' Operations on volumes connected to nodes (servers) GET - fetch volumes connected to a server POST - attach a volume to a node (server) ''' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_volumes( self, method, url, body, headers ): if(method == 'GET'): body = self.fixtures.load('ex_list_attached_volumes.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('attach_volume.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on network interfaces connected to a server GET - fetch network interfaces for a node (server) POST - create a network interface for a node (server) ''' def _cloudapi_v3_datacenters_dc_1_servers_srv_1_nics( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_network_interfaces.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_network_interface.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on network interfaces GET - fetch a network interface DELETE - destroy a network interface PATCH - update a network interface ''' def _cloudapi_v3_datacenters_dc_1_servers_s_3_nics_nic_2( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_network_interface.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_set_inet_access.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on firewall rules GET - fetch a firewall rule DELETE - destroy a firewall rule PATCH - update a firewall rule ''' def _cloudapi_v3_datacenters_dc_1_servers_s_3_nics_nic_2_firewallrules_fw2( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_firewall_rule.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_firewall_rule.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on firewall rules connected to a network interface GET - fetch a list of firewall rules connected to a network interface POST - create a firewall rule for a network interface ''' def _cloudapi_v3_datacenters_dc_1_servers_s_3_nics_nic_2_firewallrules( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_firewall_rules.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_firewall_rule.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on lans GET - fetch a list of lans POST - create a lan ''' def _cloudapi_v3_datacenters_dc_1_lans( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_lans.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_lan.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a single lan GET - fetch a lan DELETE - Destroy a lan PATCH - update a lan ''' def _cloudapi_v3_datacenters_dc_1_lans_10( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_lan.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_lan.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on snapshots GET - fetch a list of snapshots ''' def _cloudapi_v3_snapshots( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('list_snapshots.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) ''' Operations on volume snapshots POST - create a volume snapshot ''' def _cloudapi_v3_datacenters_dc_1_volumes_vol_2_create_snapshot( self, method, url, body, headers ): if method == 'POST': body = self.fixtures.load('create_volume_snapshot.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a single snapshot GET - get information on a snapshot DELETE - delete a snapshot PATCH - update a snapshot ''' def _cloudapi_v3_snapshots_sshot( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_snapshot.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_snapshot.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on load balancers GET - list load balancers POST - create a load balancer for this datacenter ''' def _cloudapi_v3_datacenters_dc_1_loadbalancers( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_load_balancers.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_load_balancer.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a single load balancer GET - get information on a load balancer DELETE - delete a load balancer PATCH - update a load balancer ''' def _cloudapi_v3_datacenters_dc_2_loadbalancers_bal_1( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_load_balancer.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) elif method == 'PATCH': body = self.fixtures.load('ex_update_load_balancer.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a load balancers nics GET - get load balanced nics ''' def _cloudapi_v3_datacenters_dc_2_loadbalancers_bal_1_balancednics( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_load_balanced_nics.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a load balanced nic DELETE - remove the nic from a load balancer ''' def _cloudapi_v3_datacenters_dc_2_loadbalancers_bal_1_balancednics_nic_2( self, method, url, body, headers ): if method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on IP blocks GET - list IP blocks POST - create an IP block ''' def _cloudapi_v3_ipblocks( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_ip_blocks.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'POST': body = self.fixtures.load('ex_create_ip_block.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) ''' Operations on a single IP block GET - fetch an IP block DELETE - delete an IP block ''' def _cloudapi_v3_ipblocks_ipb_2( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_describe_ip_block.json') return( httplib.OK, body, {}, httplib.responses[httplib.OK] ) elif method == 'DELETE': return ( httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED] ) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_cloudscale.py0000664000175000017500000001067413153541406024407 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest try: import simplejson as json except ImportError: import json # NOQA from libcloud.utils.py3 import httplib from libcloud.compute.drivers.cloudscale import CloudscaleNodeDriver from libcloud.test import LibcloudTestCase, MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import CLOUDSCALE_PARAMS class CloudscaleTests(LibcloudTestCase): def setUp(self): CloudscaleNodeDriver.connectionCls.conn_class = \ CloudscaleMockHttp self.driver = CloudscaleNodeDriver(*CLOUDSCALE_PARAMS) def test_list_images_success(self): images = self.driver.list_images() image, = images self.assertTrue(image.id is not None) self.assertTrue(image.name is not None) def test_list_sizes_success(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 2) size = sizes[0] self.assertTrue(size.id is not None) self.assertEqual(size.name, 'Flex-2') self.assertEqual(size.ram, 2048) size = sizes[1] self.assertTrue(size.id is not None) self.assertEqual(size.name, 'Flex-4') self.assertEqual(size.ram, 4096) def test_list_locations_not_existing(self): # assertRaises doesn't exist in Python 2.5?! try: self.driver.list_locations() except NotImplementedError: pass else: assert False, 'Did not raise the wished error.' def test_list_nodes_success(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].id, '47cec963-fcd2-482f-bdb6-24461b2d47b1') self.assertEqual( nodes[0].public_ips, ['185.98.122.176', '2a06:c01:1:1902::7ab0:176'] ) def test_reboot_node_success(self): node = self.driver.list_nodes()[0] result = self.driver.reboot_node(node) self.assertTrue(result) def test_create_node_success(self): test_size = self.driver.list_sizes()[0] test_image = self.driver.list_images()[0] created_node = self.driver.create_node('node-name', test_size, test_image) self.assertEqual(created_node.id, "47cec963-fcd2-482f-bdb6-24461b2d47b1") def test_destroy_node_success(self): node = self.driver.list_nodes()[0] result = self.driver.destroy_node(node) self.assertTrue(result) class CloudscaleMockHttp(MockHttp): fixtures = ComputeFileFixtures('cloudscale') def _v1_images(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_flavors(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_servers(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: body = self.fixtures.load('create_node.json') response = httplib.responses[httplib.CREATED] return (httplib.CREATED, body, {}, response) def _v1_servers_47cec963_fcd2_482f_bdb6_24461b2d47b1(self, method, url, body, headers): assert method == 'DELETE' return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) def _v1_servers_47cec963_fcd2_482f_bdb6_24461b2d47b1_reboot(self, method, url, body, headers): return (httplib.OK, "", {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_cloudsigma_v2_0.py0000664000175000017500000006633213153541406025250 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys try: import simplejson as json except: import json from libcloud.utils.py3 import httplib from libcloud.common.types import InvalidCredsError from libcloud.compute.drivers.cloudsigma import CloudSigmaNodeDriver from libcloud.compute.drivers.cloudsigma import CloudSigma_2_0_NodeDriver from libcloud.compute.drivers.cloudsigma import CloudSigmaError from libcloud.compute.types import NodeState from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures class CloudSigmaAPI20BaseTestCase(object): def setUp(self): self.driver_klass.connectionCls.conn_class = CloudSigmaMockHttp CloudSigmaMockHttp.type = None CloudSigmaMockHttp.use_param = 'do' self.driver = self.driver_klass(*self.driver_args, **self.driver_kwargs) self.driver.DRIVE_TRANSITION_SLEEP_INTERVAL = 0.1 self.driver.DRIVE_TRANSITION_TIMEOUT = 1 self.node = self.driver.list_nodes()[0] def test_invalid_api_versions(self): expected_msg = 'Unsupported API version: invalid' self.assertRaisesRegexp(NotImplementedError, expected_msg, CloudSigmaNodeDriver, 'username', 'password', api_version='invalid') def test_invalid_credentials(self): CloudSigmaMockHttp.type = 'INVALID_CREDS' self.assertRaises(InvalidCredsError, self.driver.list_nodes) def test_invalid_region(self): expected_msg = 'Invalid region:' self.assertRaisesRegexp(ValueError, expected_msg, CloudSigma_2_0_NodeDriver, 'foo', 'bar', region='invalid') def test_list_sizes(self): sizes = self.driver.list_sizes() size = sizes[0] self.assertEqual(size.id, 'micro-regular') def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(image.name, 'ubuntu-10.04-toMP') self.assertEqual(image.extra['image_type'], 'preinst') self.assertEqual(image.extra['media'], 'disk') self.assertEqual(image.extra['os'], 'linux') def test_list_nodes(self): nodes = self.driver.list_nodes() node = nodes[0] self.assertEqual(len(nodes), 2) self.assertEqual(node.id, '9de75ed6-fd33-45e2-963f-d405f31fd911') self.assertEqual(node.name, 'test no drives') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['185.12.5.181', '178.22.68.55']) self.assertEqual(node.private_ips, []) def test_create_node(self): image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] metadata = {'foo': 'bar'} node = self.driver.create_node(name='test node', size=size, image=image, ex_metadata=metadata) self.assertEqual(node.name, 'test node') self.assertEqual(len(node.extra['nics']), 1) self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp') def test_create_node_with_vlan(self): image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] vlan_uuid = '39ae851d-433f-4ac2-a803-ffa24cb1fa3e' node = self.driver.create_node(name='test node vlan', size=size, image=image, ex_vlan=vlan_uuid) self.assertEqual(node.name, 'test node vlan') self.assertEqual(len(node.extra['nics']), 2) self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp') self.assertEqual(node.extra['nics'][1]['vlan']['uuid'], vlan_uuid) def test_destroy_node(self): status = self.driver.destroy_node(node=self.node) self.assertTrue(status) def test_ex_start_node(self): status = self.driver.ex_start_node(node=self.node) self.assertTrue(status) def test_ex_start_node_avoid_mode(self): CloudSigmaMockHttp.type = 'AVOID_MODE' ex_avoid = ['1', '2'] status = self.driver.ex_start_node(node=self.node, ex_avoid=ex_avoid) self.assertTrue(status) def test_ex_start_node_already_started(self): CloudSigmaMockHttp.type = 'ALREADY_STARTED' expected_msg = 'Cannot start guest in state "started". Guest should ' \ 'be in state "stopped' self.assertRaisesRegexp(CloudSigmaError, expected_msg, self.driver.ex_start_node, node=self.node) def test_ex_stop_node(self): status = self.driver.ex_stop_node(node=self.node) self.assertTrue(status) def test_ex_stop_node_already_stopped(self): CloudSigmaMockHttp.type = 'ALREADY_STOPPED' expected_msg = 'Cannot stop guest in state "stopped"' self.assertRaisesRegexp(CloudSigmaError, expected_msg, self.driver.ex_stop_node, node=self.node) def test_ex_clone_node(self): node_to_clone = self.driver.list_nodes()[0] cloned_node = self.driver.ex_clone_node(node=node_to_clone, name='test cloned node') self.assertEqual(cloned_node.name, 'test cloned node') def test_ex_open_vnc_tunnel(self): node = self.driver.list_nodes()[0] vnc_url = self.driver.ex_open_vnc_tunnel(node=node) self.assertEqual(vnc_url, 'vnc://direct.lvs.cloudsigma.com:41111') def test_ex_close_vnc_tunnel(self): node = self.driver.list_nodes()[0] status = self.driver.ex_close_vnc_tunnel(node=node) self.assertTrue(status) def test_ex_list_library_drives(self): drives = self.driver.ex_list_library_drives() drive = drives[0] self.assertEqual(drive.name, 'IPCop 2.0.2') self.assertEqual(drive.size, 1000000000) self.assertEqual(drive.media, 'cdrom') self.assertEqual(drive.status, 'unmounted') def test_ex_list_user_drives(self): drives = self.driver.ex_list_user_drives() drive = drives[0] self.assertEqual(drive.name, 'test node 2-drive') self.assertEqual(drive.size, 13958643712) self.assertEqual(drive.media, 'disk') self.assertEqual(drive.status, 'unmounted') def test_ex_create_drive(self): CloudSigmaMockHttp.type = 'CREATE' name = 'test drive 5' size = 2000 * 1024 * 1024 drive = self.driver.ex_create_drive(name=name, size=size, media='disk') self.assertEqual(drive.name, 'test drive 5') self.assertEqual(drive.media, 'disk') def test_ex_clone_drive(self): drive = self.driver.ex_list_user_drives()[0] cloned_drive = self.driver.ex_clone_drive(drive=drive, name='cloned drive') self.assertEqual(cloned_drive.name, 'cloned drive') def test_ex_resize_drive(self): drive = self.driver.ex_list_user_drives()[0] size = 1111 * 1024 * 1024 resized_drive = self.driver.ex_resize_drive(drive=drive, size=size) self.assertEqual(resized_drive.name, 'test drive 5') self.assertEqual(resized_drive.media, 'disk') self.assertEqual(resized_drive.size, size) def test_ex_list_firewall_policies(self): policies = self.driver.ex_list_firewall_policies() policy = policies[1] rule = policy.rules[0] self.assertEqual(policy.name, 'My awesome policy') self.assertEqual(rule.action, 'drop') self.assertEqual(rule.direction, 'out') self.assertEqual(rule.dst_ip, '23.0.0.0/32') self.assertEqual(rule.ip_proto, 'tcp') self.assertEqual(rule.dst_port, None) self.assertEqual(rule.src_ip, None) self.assertEqual(rule.src_port, None) self.assertEqual(rule.comment, 'Drop traffic from the VM to IP address 23.0.0.0/32') def test_ex_create_firewall_policy_no_rules(self): CloudSigmaMockHttp.type = 'CREATE_NO_RULES' policy = self.driver.ex_create_firewall_policy(name='test policy 1') self.assertEqual(policy.name, 'test policy 1') self.assertEqual(policy.rules, []) def test_ex_create_firewall_policy_with_rules(self): CloudSigmaMockHttp.type = 'CREATE_WITH_RULES' rules = [ { 'action': 'accept', 'direction': 'out', 'ip_proto': 'tcp', 'src_ip': '127.0.0.1', 'dst_ip': '127.0.0.1' } ] policy = self.driver.ex_create_firewall_policy(name='test policy 2', rules=rules) rule = policy.rules[0] self.assertEqual(policy.name, 'test policy 2') self.assertEqual(len(policy.rules), 1) self.assertEqual(rule.action, 'accept') self.assertEqual(rule.direction, 'out') self.assertEqual(rule.ip_proto, 'tcp') def test_ex_attach_firewall_policy(self): policy = self.driver.ex_list_firewall_policies()[0] node = self.driver.list_nodes()[0] CloudSigmaMockHttp.type = 'ATTACH_POLICY' updated_node = self.driver.ex_attach_firewall_policy(policy=policy, node=node) nic = updated_node.extra['nics'][0] self.assertEqual(nic['firewall_policy']['uuid'], '461dfb8c-e641-43d7-a20e-32e2aa399086') def test_ex_attach_firewall_policy_inexistent_nic(self): policy = self.driver.ex_list_firewall_policies()[0] node = self.driver.list_nodes()[0] nic_mac = 'inexistent' expected_msg = 'Cannot find the NIC interface to attach a policy to' self.assertRaisesRegexp(ValueError, expected_msg, self.driver.ex_attach_firewall_policy, policy=policy, node=node, nic_mac=nic_mac) def test_ex_delete_firewall_policy(self): policy = self.driver.ex_list_firewall_policies()[0] status = self.driver.ex_delete_firewall_policy(policy=policy) self.assertTrue(status) def test_ex_list_tags(self): tags = self.driver.ex_list_tags() tag = tags[0] self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d') self.assertEqual(tag.name, 'test tag 2') def test_ex_get_tag(self): tag = self.driver.ex_get_tag(tag_id='a010ec41-2ead-4630-a1d0-237fa77e4d4d') self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d') self.assertEqual(tag.name, 'test tag 2') def test_ex_create_tag(self): tag = self.driver.ex_create_tag(name='test tag 3') self.assertEqual(tag.name, 'test tag 3') def test_ex_create_tag_with_resources(self): CloudSigmaMockHttp.type = 'WITH_RESOURCES' resource_uuids = ['1'] tag = self.driver.ex_create_tag(name='test tag 3', resource_uuids=resource_uuids) self.assertEqual(tag.name, 'test tag 3') self.assertEqual(tag.resources, resource_uuids) def test_ex_tag_resource(self): node = self.driver.list_nodes()[0] tag = self.driver.ex_list_tags()[0] updated_tag = self.driver.ex_tag_resource(resource=node, tag=tag) self.assertEqual(updated_tag.name, 'test tag 3') def test_ex_tag_resources(self): nodes = self.driver.list_nodes() tag = self.driver.ex_list_tags()[0] updated_tag = self.driver.ex_tag_resources(resources=nodes, tag=tag) self.assertEqual(updated_tag.name, 'test tag 3') def test_ex_tag_resource_invalid_resource_object(self): tag = self.driver.ex_list_tags()[0] expected_msg = 'Resource doesn\'t have id attribute' self.assertRaisesRegexp(ValueError, expected_msg, self.driver.ex_tag_resource, tag=tag, resource={}) def test_ex_delete_tag(self): tag = self.driver.ex_list_tags()[0] status = self.driver.ex_delete_tag(tag=tag) self.assertTrue(status) def test_ex_get_balance(self): balance = self.driver.ex_get_balance() self.assertEqual(balance['balance'], '10.00') self.assertEqual(balance['currency'], 'USD') def test_ex_get_pricing(self): pricing = self.driver.ex_get_pricing() self.assertTrue('current' in pricing) self.assertTrue('next' in pricing) self.assertTrue('objects' in pricing) def test_ex_get_usage(self): pricing = self.driver.ex_get_usage() self.assertTrue('balance' in pricing) self.assertTrue('usage' in pricing) def test_ex_list_subscriptions(self): subscriptions = self.driver.ex_list_subscriptions() subscription = subscriptions[0] self.assertEqual(len(subscriptions), 5) self.assertEqual(subscription.id, '7272') self.assertEqual(subscription.resource, 'vlan') self.assertEqual(subscription.amount, 1) self.assertEqual(subscription.period, '345 days, 0:00:00') self.assertEqual(subscription.status, 'active') self.assertEqual(subscription.price, '0E-20') def test_ex_create_subscription(self): CloudSigmaMockHttp.type = 'CREATE_SUBSCRIPTION' subscription = self.driver.ex_create_subscription(amount=1, period='1 month', resource='vlan') self.assertEqual(subscription.amount, 1) self.assertEqual(subscription.period, '1 month') self.assertEqual(subscription.resource, 'vlan') self.assertEqual(subscription.price, '10.26666666666666666666666667') self.assertEqual(subscription.auto_renew, False) self.assertEqual(subscription.subscribed_object, '2494079f-8376-40bf-9b37-34d633b8a7b7') def test_ex_list_subscriptions_status_filterting(self): CloudSigmaMockHttp.type = 'STATUS_FILTER' self.driver.ex_list_subscriptions(status='active') def test_ex_list_subscriptions_resource_filterting(self): CloudSigmaMockHttp.type = 'RESOURCE_FILTER' resources = ['cpu', 'mem'] self.driver.ex_list_subscriptions(resources=resources) def test_ex_toggle_subscription_auto_renew(self): subscription = self.driver.ex_list_subscriptions()[0] status = self.driver.ex_toggle_subscription_auto_renew( subscription=subscription) self.assertTrue(status) def test_ex_list_capabilities(self): capabilities = self.driver.ex_list_capabilities() self.assertEqual(capabilities['servers']['cpu']['min'], 250) def test_ex_list_servers_availability_groups(self): groups = self.driver.ex_list_servers_availability_groups() self.assertEqual(len(groups), 3) self.assertEqual(len(groups[0]), 2) self.assertEqual(len(groups[2]), 1) def test_ex_list_drives_availability_groups(self): groups = self.driver.ex_list_drives_availability_groups() self.assertEqual(len(groups), 1) self.assertEqual(len(groups[0]), 11) def test_wait_for_drive_state_transition_timeout(self): drive = self.driver.ex_list_user_drives()[0] state = 'timeout' expected_msg = 'Timed out while waiting for drive transition' self.assertRaisesRegexp(Exception, expected_msg, self.driver._wait_for_drive_state_transition, drive=drive, state=state, timeout=0.5) def test_wait_for_drive_state_transition_success(self): drive = self.driver.ex_list_user_drives()[0] state = 'unmounted' drive = self.driver._wait_for_drive_state_transition(drive=drive, state=state, timeout=0.5) self.assertEqual(drive.status, state) class CloudSigmaAPI20DirectTestCase(CloudSigmaAPI20BaseTestCase, unittest.TestCase): driver_klass = CloudSigma_2_0_NodeDriver driver_args = ('foo', 'bar') driver_kwargs = {} class CloudSigmaAPI20IndirectTestCase(CloudSigmaAPI20BaseTestCase, unittest.TestCase): driver_klass = CloudSigmaNodeDriver driver_args = ('foo', 'bar') driver_kwargs = {'api_version': '2.0'} class CloudSigmaMockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('cloudsigma_2_0') def _api_2_0_servers_detail_INVALID_CREDS(self, method, url, body, headers): body = self.fixtures.load('libdrives.json') return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) def _api_2_0_libdrives(self, method, url, body, headers): body = self.fixtures.load('libdrives.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_servers_detail(self, method, url, body, headers): body = self.fixtures.load('servers_detail_mixed_state.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911(self, method, url, body, headers): body = '' return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _api_2_0_servers(self, method, url, body, headers): if method == 'POST': # create_node parsed = json.loads(body) if 'vlan' in parsed['name']: self.assertEqual(len(parsed['nics']), 2) body = self.fixtures.load('servers_create_with_vlan.json') else: body = self.fixtures.load('servers_create.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_start(self, method, url, body, headers): body = self.fixtures.load('start_success.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_AVOID_MODE_start(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'avoid': '1,2'}) body = self.fixtures.load('start_success.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STARTED_start(self, method, url, body, headers): body = self.fixtures.load('start_already_started.json') return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_stop(self, method, url, body, headers): body = self.fixtures.load('stop_success.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STOPPED_stop(self, method, url, body, headers): body = self.fixtures.load('stop_already_stopped.json') return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_clone(self, method, url, body, headers): body = self.fixtures.load('servers_clone.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_open_vnc(self, method, url, body, headers): body = self.fixtures.load('servers_open_vnc.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_close_vnc(self, method, url, body, headers): body = self.fixtures.load('servers_close_vnc.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_drives_detail(self, method, url, body, headers): body = self.fixtures.load('drives_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913(self, method, url, body, headers): body = self.fixtures.load('drives_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809(self, method, url, body, headers): body = self.fixtures.load('drives_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_drives_CREATE(self, method, url, body, headers): body = self.fixtures.load('drives_create.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_clone(self, method, url, body, headers): body = self.fixtures.load('drives_clone.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_drives_5236b9ee_f735_42fd_a236_17558f9e12d3_action_clone(self, method, url, body, headers): body = self.fixtures.load('drives_clone.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913_action_resize(self, method, url, body, headers): body = self.fixtures.load('drives_resize.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_resize(self, method, url, body, headers): body = self.fixtures.load('drives_resize.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _api_2_0_fwpolicies_detail(self, method, url, body, headers): body = self.fixtures.load('fwpolicies_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_fwpolicies_CREATE_NO_RULES(self, method, url, body, headers): body = self.fixtures.load('fwpolicies_create_no_rules.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_fwpolicies_CREATE_WITH_RULES(self, method, url, body, headers): body = self.fixtures.load('fwpolicies_create_with_rules.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_ATTACH_POLICY(self, method, url, body, headers): body = self.fixtures.load('servers_attach_policy.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_fwpolicies_0e339282_0cb5_41ac_a9db_727fb62ff2dc(self, method, url, body, headers): if method == 'DELETE': body = '' return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _api_2_0_tags_detail(self, method, url, body, headers): body = self.fixtures.load('tags_detail.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_tags(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('tags_create.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_tags_WITH_RESOURCES(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('tags_create_with_resources.json') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _api_2_0_tags_a010ec41_2ead_4630_a1d0_237fa77e4d4d(self, method, url, body, headers): if method == 'GET': # ex_get_tag body = self.fixtures.load('tags_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'PUT': # ex_tag_resource body = self.fixtures.load('tags_update.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'DELETE': # ex_delete_tag body = '' return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _api_2_0_balance(self, method, url, body, headers): body = self.fixtures.load('balance.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_pricing(self, method, url, body, headers): body = self.fixtures.load('pricing.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_currentusage(self, method, url, body, headers): body = self.fixtures.load('currentusage.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_subscriptions(self, method, url, body, headers): body = self.fixtures.load('subscriptions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_subscriptions_STATUS_FILTER(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'status': 'active'}) body = self.fixtures.load('subscriptions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_subscriptions_RESOURCE_FILTER(self, method, url, body, headers): expected_params = {'resource': 'cpu,mem', 'status': 'all'} self.assertUrlContainsQueryParams(url, expected_params) body = self.fixtures.load('subscriptions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_subscriptions_7272_action_auto_renew(self, method, url, body, headers): body = '' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_subscriptions_CREATE_SUBSCRIPTION(self, method, url, body, headers): body = self.fixtures.load('create_subscription.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_capabilities(self, method, url, body, headers): body = self.fixtures.load('capabilities.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_servers_availability_groups(self, method, url, body, headers): body = self.fixtures.load('servers_avail_groups.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_2_0_drives_availability_groups(self, method, url, body, headers): body = self.fixtures.load('drives_avail_groups.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_types.py0000664000175000017500000000366112701023453023426 0ustar kamikami00000000000000from unittest import TestCase from libcloud.compute.types import Provider, NodeState, StorageVolumeState, \ VolumeSnapshotState, Type class TestType(Type): INUSE = "inuse" class TestTestType(TestCase): model = TestType attribute = TestType.INUSE def test_provider_tostring(self): self.assertEqual(Provider.tostring(TestType.INUSE), "INUSE") def test_provider_fromstring(self): self.assertEqual(TestType.fromstring("inuse"), TestType.INUSE) def test_provider_fromstring_caseinsensitive(self): self.assertEqual(TestType.fromstring("INUSE"), TestType.INUSE) class TestProvider(TestCase): def test_provider_tostring(self): self.assertEqual(Provider.tostring(Provider.RACKSPACE), "RACKSPACE") def test_provider_fromstring(self): self.assertEqual(Provider.fromstring("rackspace"), Provider.RACKSPACE) class TestNodeState(TestCase): def test_nodestate_tostring(self): self.assertEqual(NodeState.tostring(NodeState.RUNNING), "RUNNING") def test_nodestate_fromstring(self): self.assertEqual(NodeState.fromstring("running"), NodeState.RUNNING) class TestStorageVolumeState(TestCase): def test_storagevolumestate_tostring(self): self.assertEqual( StorageVolumeState.tostring(StorageVolumeState.AVAILABLE), "AVAILABLE" ) def test_storagevolumestate_fromstring(self): self.assertEqual( StorageVolumeState.fromstring("available"), StorageVolumeState.AVAILABLE ) class TestVolumeSnapshotState(TestCase): def test_volumesnapshotstate_tostring(self): self.assertEqual( VolumeSnapshotState.tostring(VolumeSnapshotState.AVAILABLE), "AVAILABLE" ) def test_volumesnapshotstate_fromstring(self): self.assertEqual( VolumeSnapshotState.fromstring("available"), VolumeSnapshotState.AVAILABLE ) apache-libcloud-2.2.1/libcloud/test/compute/test_onapp.py0000664000175000017500000001242113153541406023376 0ustar kamikami00000000000000import unittest import sys from libcloud.compute.base import Node from libcloud.compute.drivers.onapp import OnAppNodeDriver from libcloud.test import MockHttp, LibcloudTestCase from libcloud.test.secrets import ONAPP_PARAMS from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.utils.py3 import httplib class OnAppNodeTestCase(LibcloudTestCase): driver_klass = OnAppNodeDriver def setUp(self): self.driver_klass.connectionCls.conn_class = OnAppMockHttp self.driver = OnAppNodeDriver(*ONAPP_PARAMS) def test_create_node(self): node = self.driver.create_node( name='onapp-new-fred', ex_memory=512, ex_cpus=4, ex_cpu_shares=4, ex_hostname='onapp-new-fred', ex_template_id='template_id', ex_primary_disk_size=100, ex_swap_disk_size=1, ex_required_virtual_machine_build=0, ex_required_ip_address_assignment=0 ) extra = node.extra self.assertEqual('onapp-new-fred', node.name) self.assertEqual('456789', node.id) self.assertEqual('456789', node.id) self.assertEqual('delivered', node.state) self.assertEqual(True, extra['booted']) self.assertEqual('passwd', extra['initial_root_password']) self.assertEqual('8.8.8.8', extra['local_remote_access_ip_address']) self.assertEqual(['192.168.15.73'], node.private_ips) self.assertEqual([], node.public_ips) def test_destroy_node(self): node = Node('identABC', 'testnode', ['123.123.123.123'], [], {'state': 'test', 'template_id': 88}, None) res = self.driver.destroy_node(node=node) self.assertTrue(res) def test_list_nodes(self): nodes = self.driver.list_nodes() extra = nodes[0].extra private_ips = nodes[0].private_ips self.assertEqual(1, len(nodes)) self.assertEqual('onapp-fred', nodes[0].name) self.assertEqual('123456', nodes[0].id) self.assertEqual(True, extra['booted']) self.assertEqual('passwd', extra['initial_root_password']) self.assertEqual('9.9.9.9', extra['local_remote_access_ip_address']) self.assertEqual(1, len(private_ips)) self.assertEqual('192.168.15.72', private_ips[0]) def test_list_images(self): images = self.driver.list_images() extra = images[0].extra self.assertEqual(1, len(images)) self.assertEqual('CentOS 5.11 x64', images[0].name) self.assertEqual('123456', images[0].id) self.assertEqual(True, extra['allowed_swap']) self.assertEqual(256, extra['min_memory_size']) self.assertEqual('rhel', extra['distribution']) def test_list_key_pairs(self): keys = self.driver.list_key_pairs() self.assertEqual(2, len(keys)) self.assertEqual(1, keys[0].name) self.assertIsNotNone(keys[0].public_key) self.assertIsNotNone(keys[1].public_key) def test_get_key_pair(self): key = self.driver.get_key_pair(1) self.assertEqual(1, key.name) self.assertIsNotNone(key.public_key) def test_import_key_pair_from_string(self): key = self.driver.import_key_pair_from_string( 'name', 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8uuUq') self.assertEqual(3, key.name) self.assertEqual( 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8uuUq', key.public_key) def test_delete_key_pair(self): key = self.driver.get_key_pair(1) response = self.driver.delete_key_pair(key) self.assertTrue(response) class OnAppMockHttp(MockHttp): fixtures = ComputeFileFixtures('onapp') def _virtual_machines_json(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('list_nodes.json') else: body = self.fixtures.load('create_node.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _virtual_machines_identABC_json(self, method, url, body, headers): return ( httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT] ) def _templates_json(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _profile_json(self, method, url, body, headers): body = self.fixtures.load('profile.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _users_123_ssh_keys_json(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('list_key_pairs.json') else: body = self.fixtures.load('import_key_pair.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _users_123_ssh_keys_1_json(self, method, url, body, headers): body = self.fixtures.load('get_key_pair.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _settings_ssh_keys_1_json(self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_oneandone.py0000664000175000017500000012315413153541406024235 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.base import NodeImage, NodeAuthPassword, NodeLocation from libcloud.utils.py3 import httplib from libcloud.test import unittest, MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.compute.types import Provider, NodeState from libcloud.test.secrets import ONEANDONE_PARAMS from libcloud.compute.providers import get_driver class OneAndOneTests(unittest.TestCase): def setUp(self): oneAndOne = get_driver(Provider.ONEANDONE) oneAndOne.connectionCls.conn_class = OneAndOneMockHttp self.driver = oneAndOne(ONEANDONE_PARAMS) ''' Function tests for listing items ''' def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 4) def test_list_locations(self): sizes = self.driver.list_locations() self.assertEqual(len(sizes), 4) def test_list_images(self): appliances = self.driver.list_images("IMAGE") self.assertEqual(len(appliances), 102) def test_get_image(self): appliance = self.driver.get_image('img_1') self.assertNotEqual(appliance, None) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 5) counter = 0 for node in nodes: if counter == 0: self.assertEqual(node.id, '8A7D5122BDC173B6E52223878CEF2748') self.assertEqual(node.name, 'Docs Content Ubuntu 16.04-1') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['50.21.182.126']) self.assertEqual(node.private_ips, []) if counter == 1: self.assertEqual(node.id, 'E7D36EC025C73796035BF4F171379025') self.assertEqual(node.name, 'Docs Content Test Server: CentOS 7-1') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['62.151.179.99']) self.assertEqual(node.private_ips, []) if counter == 2: self.assertEqual(node.id, 'DDDC4CCA34AAB08132FA1E40F9FEAC25') self.assertEqual(node.name, 'App Dev Server 5') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['70.35.206.196']) self.assertEqual(node.private_ips, []) if counter == 3: self.assertEqual(node.id, 'D5C5C1D01249DE9B88BE3DAE973AA090') self.assertEqual(node.name, 'Docs Test Server: CentOS 7-2') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['74.208.88.88']) self.assertEqual(node.private_ips, []) if counter == 4: self.assertEqual(node.id, 'FB1765588A90364835782061CE48BA8E') self.assertEqual(node.name, 'Docs Content Test Server Ubuntu 16.04-2') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.public_ips, ['70.35.206.233']) self.assertEqual(node.private_ips, []) counter += 1 def test_create_node(self): node = self.driver.create_node(name="name", image=NodeImage( id="image_id", name=None, driver=self.driver), ex_fixed_instance_size_id="instance_id", location=NodeLocation( "datacenter_id", name="name", country="GB", driver=self.driver), auth=NodeAuthPassword("password"), ex_ip="1.1.1.1", ex_monitoring_policy_id="mp_id", ex_firewall_policy_id="fw_id", ex_loadbalancer_id="lb_id", ex_description="description", ex_power_on="SHUTDOWN") self.assertEqual(node.id, "E7D36EC025C73796035BF4F171379025") self.assertEqual(node.name, "Docs Content Test Server: CentOS 7-1") self.assertEqual(node.extra["description"], "My server description") self.assertEqual(node.extra["status"]["percent"], None) self.assertEqual(node.extra["status"]["state"], "POWERED_ON") self.assertEqual(node.extra["image"]["id"], "B5F778B85C041347BCDCFC3172AB3F3C") self.assertEqual(node.extra["image"]["name"], "centos7-64std") self.assertEqual(node.extra["datacenter"]["id"], "908DC2072407C94C8054610AD5A53B8C") self.assertEqual(node.extra["datacenter"]["country_code"], "US") self.assertEqual(node.extra["datacenter"]["location"], "United States of America") self.assertEqual(node.extra["hardware"]["fixed_instance_size_id"], "65929629F35BBFBA63022008F773F3EB") self.assertEqual(node.extra["hardware"]["vcore"], 1) self.assertEqual(node.extra["hardware"]["hdds"][0]["id"], "CDB278D95A92CB4C379A9CAAD6759F02") self.assertEqual(node.extra["hardware"]["hdds"][0]["size"], 40) self.assertEqual(node.extra["hardware"]["hdds"][0]["is_main"], True) self.assertEqual(node.extra["hardware"]["cores_per_processor"], 1) self.assertEqual(node.extra["hardware"]["vcore"], 1) self.assertEqual(node.extra["hardware"]["ram"], 1) self.assertEqual(node.extra["ips"][0]["id"], "FDBE99EDD57F8596CBF71B6B64BD0A92") self.assertEqual(node.extra["ips"][0]["ip"], "62.151.179.99") self.assertEqual(node.extra["monitoring_policy"], None) self.assertEqual(node.extra["alerts"], []) self.assertEqual(node.extra["snapshot"], None) self.assertEqual(node.extra["dvd"], None) self.assertEqual(node.extra["private_networks"], None) def test_ex_list_datacenters(self): datacenters = self.driver.ex_list_datacenters() self.assertEqual(len(datacenters), 4) def test_ex_shutdown_server(self): server = self.driver.ex_shutdown_server("srv_1") self.assertNotEqual(server, None) def test_reboot_node(self): node = self.driver.ex_get_server("srv_1") node = self.driver.reboot_node(node) self.assertNotEqual(node, None) def test_ex_get_server(self): server = self.driver.ex_get_server("srv_1") self.assertNotEqual(server, None) def test_destroy_node(self): server = self.driver.ex_get_server("srv_1") node = self.driver.destroy_node(server) self.assertNotEqual(node, None) def test_get_server_hardware(self): hardware = self.driver.ex_get_server_hardware("srv_1") self.assertNotEqual(hardware, None) self.assertEqual(hardware['vcore'], 1) self.assertEqual(hardware['cores_per_processor'], 1) self.assertEqual(hardware['ram'], 2) self.assertEqual(hardware['fixed_instance_size_id'], 0) self.assertNotEqual(hardware['hdds'], None) self.assertEqual(hardware['hdds'][0]['id'], '8C626C1A7005D0D1F527143C413D461E') self.assertEqual(hardware['hdds'][0]['is_main'], True) self.assertEqual(hardware['hdds'][0]['size'], 40) def test_rename_server(self): server = self.driver.ex_rename_server("srv_1", "name") self.assertNotEqual(server, None) def test_ex_modify_server_hardware(self): node = self.driver.ex_modify_server_hardware("srv_1", vcore=1) self.assertNotEqual(node, None) def test_add_hdd(self): node = self.driver.ex_add_hdd("srv_1", 1, True) self.assertNotEqual(node, None) def test_modify_hdd(self): node = self.driver.ex_modify_server_hardware("srv_1", "hdd_id", 50) self.assertNotEqual(node, None) def test_remove_hdd(self): node = self.driver.ex_remove_hdd("srv_1", "hdd_id") self.assertNotEqual(node, None) def test_ex_get_server_image(self): image = self.driver.ex_get_server_image("srv_1") self.assertNotEqual(image, None) self.assertEqual(image['id'], "76EBF29C1250167C8754B2B3D1C05F68") self.assertEqual(image['name'], "centos7-64std") def test_ex_reinstall_server_image(self): node = self.driver.ex_reinstall_server_image("srv_1", "img_id", "password") self.assertNotEqual(node, None) def test_ex_list_server_ips(self): ips = self.driver.ex_list_server_ips("srv_1") self.assertEqual(len(ips), 1) def test_ex_get_server_ip(self): ip = self.driver.ex_get_server_ip("srv_1", "ip_id") self.assertNotEqual(ip, None) def test_ex_assign_server(self): node = self.driver.ex_assign_server_ip("srv_1", "IPV$") self.assertNotEqual(node, None) def test_ex_remove_server_ip(self): node = self.driver.ex_remove_server_ip("srv_1", "ip_id", keep_ip=True) self.assertNotEqual(node, None) def test_ex_create_firewall_policy(self): rules = [ { "protocol": "TCP", "port_from": 80, "port_to": 80, "source": "0.0.0.0" }, { "protocol": "TCP", "port_from": 443, "port_to": 443, "source": "0.0.0.0" } ] firewall = self.driver \ .ex_create_firewall_policy("name", rules, description="desc") self.assertNotEqual(firewall, None) def test_ex_list_firewall_policies(self): firewall = self.driver.ex_list_firewall_policies() self.assertNotEqual(firewall, None) self.assertEqual(len(firewall), 2) def test_ex_get_firewall_policy(self): firewall = self.driver.ex_get_firewall_policy("fw_id") self.assertNotEqual(firewall, None) def test_ex_delete_firewall_policy(self): firewall = self.driver.ex_delete_firewall_policy("fw_id") self.assertNotEqual(firewall, None) def test_ex_get_server_firewall_policies(self): firewall = self.driver \ .ex_get_server_firewall_policies("srv_id", "ip_id") self.assertNotEqual(firewall, None) def test_ex_remove_server_firewall_policies(self): node = self.driver \ .ex_remove_server_firewall_policy("srv_id", "ip_id") self.assertNotEqual(node, None) def test_ex_add_server_firewall_policy(self): node = self.driver \ .ex_add_server_firewall_policy("srv_id", "ip_id", "fw_id") self.assertNotEqual(node, None) def test_ex_list_shared_storages(self): storages = self.driver.ex_list_shared_storages() self.assertEquals(len(storages), 3) def test_ex_get_shared_storage(self): storage = self.driver.ex_get_shared_storage('storage_1') self.assertNotEqual(storage, None) self.assertEqual(storage['id'], "6AD2F180B7B666539EF75A02FE227084") self.assertEqual(storage['size'], 200) self.assertEqual(storage['state'], 'ACTIVE') self.assertEqual(storage['description'], 'My shared storage test description') self.assertEqual(storage['datacenter']['id'], 'D0F6D8C8ED29D3036F94C27BBB7BAD36') self.assertEqual(storage['datacenter']['location'], 'USA') self.assertEqual(storage['datacenter']['country_code'], 'US') self.assertEqual(storage['cloudpanel_id'], 'vid35780') self.assertEqual(storage['size_used'], '0.00') self.assertEqual(storage["cifs_path"], "vid50995.nas1.lanvid50995") self.assertEqual(storage["nfs_path"], "vid50995.nas1.lan/:vid50995") self.assertEqual(storage["name"], "My shared storage test") self.assertEqual(storage["creation_date"], "2015-05-06T08:33:25+00:00") self.assertEqual(storage['servers'][0]['id'], '638ED28205B1AFD7ADEF569C725DD85F') self.assertEqual(storage['servers'][0]["name"], "My server 1") self.assertEqual(storage['servers'][0]["rights"], "RW") def test_ex_create_shared_storage(self): storage = self.driver.ex_create_shared_storage( name='TEST', size=2, datacenter_id='dc_id') self.assertNotEqual(storage, None) def test_ex_delete_shared_storage(self): storage = self.driver.ex_delete_shared_storage('storage_1') self.assertNotEqual(storage, None) def test_ex_attach_server_to_shared_storage(self): storage = self.driver.ex_attach_server_to_shared_storage( 'storage_1', 'srv_1', 'RW') self.assertNotEqual(storage, None) def test_ex_get_shared_storage_server(self): storage = self.driver.ex_get_shared_storage_server( 'storage_1', 'srv_1') self.assertNotEqual(storage, None) def test_ex_detach_server_from_shared_storage(self): storage = self.driver.ex_detach_server_from_shared_storage( 'storage_1', 'srv_1') self.assertNotEqual(storage, None) def test_ex_create_load_balancers(self): rules = [ { "protocol": "TCP", "port_balancer": 80, "port_server": 80, "source": "0.0.0.0" }, { "protocol": "TCP", "port_balancer": 9999, "port_server": 8888, "source": "0.0.0.0" } ] load_balancer = self.driver. \ ex_create_load_balancer(name='name', method='ROUNDROBIN', rules=rules, persistence=True, persistence_time=1) self.assertNotEqual(load_balancer, None) def test_ex_list_load_balancers(self): load_balancers = self.driver.ex_list_load_balancers() self.assertEqual(len(load_balancers), 2) def test_update_load_balancer(self): load_balancer = self.driver. \ ex_update_load_balancer("lb_1", name='new name') self.assertNotEqual(load_balancer, None) def test_ex_add_servers_to_load_balancer(self): load_balancer = self.driver. \ ex_add_servers_to_load_balancer('lb_1', server_ips=["1.1.1.1"]) self.assertNotEqual(load_balancer, None) def test_ex_remove_server_from_load_balancer(self): load_balancer = self.driver. \ ex_remove_server_from_load_balancer('lb_1', server_ip="srv_1") self.assertNotEqual(load_balancer, None) def test_ex_add_load_balancer_rule(self): load_balancer = self.driver. \ ex_add_load_balancer_rule('lb_1', protocol='TCP', port_balancer=82, port_server=81, source='0.0.0.0') self.assertNotEqual(load_balancer, None) def test_ex_remove_load_balancer_rule(self): load_balancer = self.driver. \ ex_remove_load_balancer_rule('lb_1', 'rule_1') self.assertNotEqual(load_balancer, None) def test_ex_get_load_balancer(self): load_balancer = self.driver. \ ex_get_load_balancer('lb_1') self.assertNotEqual(load_balancer, None) def test_ex_get_load_balancer_server_ip(self): server_ip = self.driver. \ ex_get_load_balancer_server_ip('lb_1', 'srv_1') self.assertNotEqual(server_ip, None) def test_ex_list_load_balancer_rules(self): rules = self.driver. \ ex_list_load_balancer_rules('lb_1') self.assertNotEqual(rules, None) self.assertEqual(len(rules), 2) def test_ex_get_load_balancer_rule(self): rule = self.driver. \ ex_get_load_balancer_rule('lb_1', 'rule_1') self.assertNotEqual(rule, None) def test_ex_delete_load_balancer(self): load_balancer = self.driver. \ ex_delete_load_balancer('lb_1') self.assertNotEqual(load_balancer, None) def test_ex_list_public_ips(self): ips = self.driver.ex_list_public_ips() self.assertNotEqual(ips, None) self.assertEqual(len(ips), 3) def test_ex_create_public_ip(self): ip = self.driver.ex_create_public_ip('IPv4') self.assertNotEqual(ip, None) def test_ex_get_public_ip(self): ip = self.driver.ex_get_public_ip('ip_1') self.assertNotEqual(ip, None) def test_ex_delete_public_ip(self): ip = self.driver.ex_delete_public_ip('ip_1') self.assertNotEqual(ip, None) def test_ex_update_public_ip(self): ip = self.driver.ex_update_public_ip('ip_1', "reverse.dns") self.assertNotEqual(ip, None) def test_ex_create_monitoring_policy(self): thresholds = { "cpu": { "warning": { "value": 90, "alert": False }, "critical": { "value": 95, "alert": False } }, "ram": { "warning": { "value": 90, "alert": False }, "critical": { "value": 95, "alert": False } }, "disk": { "warning": { "value": 80, "alert": False }, "critical": { "value": 90, "alert": False } }, "transfer": { "warning": { "value": 1000, "alert": False }, "critical": { "value": 2000, "alert": False } }, "internal_ping": { "warning": { "value": 50, "alert": False }, "critical": { "value": 100, "alert": False } } } ports = [ { "protocol": "TCP", "port": "22", "alert_if": "RESPONDING", "email_notification": True } ] processes = [ { "process": "test", "alert_if": "NOT_RUNNING", "email_notification": True } ] policy = self.driver. \ ex_create_monitoring_policy(name='test_name', thresholds=thresholds, ports=ports, processes=processes, description='description', email='test@domain.com', agent=True) self.assertNotEqual(policy, None) def test_ex_list_monitoring_policies(self): policies = self.driver.ex_list_monitoring_policies() self.assertNotEqual(policies, None) self.assertEqual(len(policies), 2) def test_ex_get_monitoring_policy(self): policy = self.driver.ex_get_monitoring_policy('pol_1') self.assertNotEqual(policy, None) def test_ex_update_monitoring_policy(self): thresholds = { "cpu": { "warning": { "value": 90, "alert": False }, "critical": { "value": 95, "alert": False } }, "ram": { "warning": { "value": 90, "alert": False }, "critical": { "value": 95, "alert": False } }, "disk": { "warning": { "value": 80, "alert": False }, "critical": { "value": 90, "alert": False } }, "transfer": { "warning": { "value": 1000, "alert": False }, "critical": { "value": 2000, "alert": False } }, "internal_ping": { "warning": { "value": 50, "alert": False }, "critical": { "value": 100, "alert": False } } } policy = self.driver. \ ex_update_monitoring_policy('pol_1', email='test@domain.com', thresholds=thresholds, name='new name', description='new description') self.assertNotEqual(policy, None) def test_ex_get_monitoring_policy_ports(self): ports = self.driver. \ ex_get_monitoring_policy_ports('pol_1') self.assertNotEqual(ports, None) self.assertEqual(len(ports), 2) def test_ex_get_monitoring_policy_port(self): port = self.driver. \ ex_get_monitoring_policy_port('pol_1', 'port_1') self.assertNotEqual(port, None) def test_ex_remove_monitoring_policy_port(self): port = self.driver. \ ex_remove_monitoring_policy_port('pol_1', 'port_1') self.assertNotEqual(port, None) def test_ex_add_monitoring_policy_ports(self): new_ports = [ { "protocol": "TCP", "port": "80", "alert_if": "RESPONDING", "email_notification": True } ] ports = self.driver. \ ex_add_monitoring_policy_ports('pol_1', new_ports) self.assertNotEqual(ports, None) self.assertEqual(len(ports), 2) def test_ex_get_monitoring_policy_processes(self): processes = self.driver. \ ex_get_monitoring_policy_processes('pol_1') self.assertNotEqual(processes, None) def test_ex_get_monitoring_policy_process(self): process = self.driver. \ ex_get_monitoring_policy_process('pol_1', 'proc_1') self.assertNotEqual(process, None) def test_ex_remove_monitoring_policy_process(self): policy = self.driver. \ ex_remove_monitoring_policy_process('pol_1', 'proc_1') self.assertNotEqual(policy, None) def test_ex_add_monitoring_policy_processes(self): processes = { "processes": [ { "process": "taskmmgr", "alert_if": "RUNNING", "email_notification": True } ] } processes = self.driver. \ ex_add_monitoring_policy_processes(policy_id='pol_1', processes=processes) self.assertNotEqual(processes, None) self.assertEqual(len(processes), 2) def test_ex_list_monitoring_policy_servers(self): servers = self.driver.ex_list_monitoring_policy_servers('pol_1') self.assertNotEqual(servers, None) self.assertEqual(len(servers), 2) def test_ex_add_servers_to_monitoring_policy(self): servers = self.driver. \ ex_add_servers_to_monitoring_policy('pol_1', 'serv_1') self.assertNotEqual(servers, None) self.assertEqual(len(servers), 2) def test_ex_remove_server_from_monitoring_policy(self): policy = self.driver. \ ex_remove_server_from_monitoring_policy('pol_1', 'serv_1') self.assertNotEqual(policy, None) class OneAndOneMockHttp(MockHttp): fixtures = ComputeFileFixtures('oneandone') ''' Operation on Server Appliances GET - Fetches Server Appliances ''' def _v1_server_appliances(self, method, url, body, headers): body = self.fixtures.load('list_images.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_server_appliances_img_1(self, method, url, body, headers): body = self.fixtures.load('get_image.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('list_servers.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == "POST": body = self.fixtures.load('create_node.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) def _v1_create_node(self, method, url, body_headers): body = self.fixtures.load('list_servers.json') return ( httplib.ACCEPTED, {}, body, httplib.responses[httplib.ACCEPTED] ) def _v1_datacenters( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('ex_list_datacenters.json') return ( httplib.OK, body, {'content-type': 'application/json'}, httplib.responses[httplib.OK] ) def _v1_servers_srv_1( self, method, url, body, headers ): pass if method == 'PUT': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {'content-type': 'application/json'}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {'content-type': 'application/json'}, httplib.responses[httplib.OK] ) if method == 'DELETE': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers_srv_1_status_action(self, method, url, body_headers, id): body = self.fixtures.load('describe_server.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) def _v1_servers_srv_1_hardware( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('server_hardware.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'PUT': body = self.fixtures.load('describe_server.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) def _v1_servers_srv_1_hardware_hdds( self, method, url, body, headers ): if method == 'POST': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'PUT': body = self.fixtures.load('describe_server.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) def _v1_servers_srv_1_hardware_hdds_hdd_id( self, method, url, body, headers ): if method == 'DELETE': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'PUT': body = self.fixtures.load('describe_server.json') return ( httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED] ) def _v1_servers_srv_1_image( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('get_server_image.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'PUT': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers_srv_1_ips( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('server_ips.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'POST': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers_srv_1_ips_ip_id( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('server_ip.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'DELETE': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_firewall_policies( self, method, url, body, headers ): if method == 'POST': body = self.fixtures.load('describe_firewall_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('list_firewall_policies.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_firewall_policy_fw_id( self, method, url, body, headers ): if method == 'GET': body = self.fixtures.load('describe_firewall_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'DELETE': body = self.fixtures.load('describe_firewall_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers_srv_id_ips_ip_id_firewall_policy( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('describe_id_firewall_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'DELETE': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'POST': body = self.fixtures.load('describe_server.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_shared_storages( self, method, url, body, header ): if method == 'GET' or method == 'POST': body = self.fixtures.load('list_shared_storages.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_shared_storages_storage_1( self, method, url, body, header ): if method == 'GET' or method == 'DELETE': body = self.fixtures.load('shared_storage.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_shared_storages_storage_1_servers( self, method, url, body, header ): if method == 'POST' or method == 'DELETE': body = self.fixtures.load('shared_storage.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_shared_storages_storage_1_servers_srv_1( self, method, url, body, header ): if method == 'GET' or method == 'DELETE': body = self.fixtures.load('shared_storage.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers( self, method, url, body, header ): if method == 'POST': body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('list_load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers_lb_1( self, method, url, body, header ): body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers_lb_1_server_ips( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('load_balancer_server_ips.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'POST': body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers_lb_1_rules( self, method, url, body, header ): if method == 'POST': body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('load_balancer_rules.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers_lb_1_server_ips_srv_1( self, method, url, body, header ): if method == 'DELETE': body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('load_balancer_server_ip.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_load_balancers_lb_1_rules_rule_1( self, method, url, body, header ): if method == 'DELETE': body = self.fixtures.load('load_balancer.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('load_balancer_rule.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_public_ips( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('list_public_ips.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'POST': body = self.fixtures.load('public_ip.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_public_ips_ip_1( self, method, url, body, header ): body = self.fixtures.load('public_ip.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies( self, method, url, body, header ): if method == 'POST': body = self.fixtures.load('monitoring_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if method == 'GET': body = self.fixtures.load('list_monitoring_policies.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1( self, method, url, body, header ): body = self.fixtures.load('monitoring_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_ports( self, method, url, body, header ): body = self.fixtures.load('monitoring_policy_ports.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_ports_port_1( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('monitoring_policy_port.json') elif method == 'POST': body = self.fixtures.load('monitoring_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_processes( self, method, url, body, header ): body = self.fixtures.load('monitoring_policy_processes.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_processes_proc_1( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('monitoring_policy_process.json') elif method == 'POST': body = self.fixtures.load('monitoring_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_servers( self, method, url, body, header ): body = self.fixtures.load('monitoring_policy_servers.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_monitoring_policies_pol_1_servers_serv_1( self, method, url, body, header ): if method == 'GET': body = self.fixtures.load('monitoring_policy_servers.json') elif method == 'POST': body = self.fixtures.load('monitoring_policy.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) def _v1_servers_fixed_instance_sizes( self, method, url, body, header ): body = self.fixtures.load('fixed_instance_sizes.json') return ( httplib.OK, body, {}, httplib.responses[httplib.OK] ) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_gandi.py0000664000175000017500000003450113153541406023346 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys import random import string from libcloud.utils.py3 import httplib from libcloud.compute.drivers.gandi import GandiNodeDriver from libcloud.common.gandi import GandiException from libcloud.compute.types import NodeState from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import GANDI_PARAMS from libcloud.test.common.test_gandi import BaseGandiMockHttp class GandiTests(unittest.TestCase): node_name = 'test2' def setUp(self): GandiNodeDriver.connectionCls.conn_class = GandiMockHttp GandiMockHttp.type = None self.driver = GandiNodeDriver(*GANDI_PARAMS) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertTrue(len(nodes) > 0) self.assertTrue(len(nodes[0].public_ips) > 1) def test_list_locations(self): loc = list(filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations()))[0] self.assertEqual(loc.country, 'France') def test_list_images(self): loc = list(filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations()))[0] images = self.driver.list_images(loc) self.assertTrue(len(images) > 2) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertTrue(len(sizes) >= 1) def test_destroy_node_running(self): nodes = self.driver.list_nodes() test_node = list(filter(lambda x: x.state == NodeState.RUNNING, nodes))[0] self.assertTrue(self.driver.destroy_node(test_node)) def test_destroy_node_halted(self): nodes = self.driver.list_nodes() test_node = list(filter(lambda x: x.state == NodeState.TERMINATED, nodes))[0] self.assertTrue(self.driver.destroy_node(test_node)) def test_reboot_node(self): nodes = self.driver.list_nodes() test_node = list(filter(lambda x: x.state == NodeState.RUNNING, nodes))[0] self.assertTrue(self.driver.reboot_node(test_node)) def test_create_node(self): login = 'libcloud' passwd = ''.join(random.choice(string.ascii_letters) for i in range(10)) # Get france datacenter loc = list(filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations()))[0] # Get a debian image images = self.driver.list_images(loc) images = [x for x in images if x.name.lower().startswith('debian')] img = list(filter(lambda x: '5' in x.name, images))[0] # Get a configuration size size = self.driver.list_sizes()[0] node = self.driver.create_node(name=self.node_name, login=login, password=passwd, image=img, location=loc, size=size) self.assertEqual(node.name, self.node_name) def test_create_volume(self): loc = list(filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations()))[0] volume = self.driver.create_volume( size=1024, name='libcloud', location=loc) self.assertEqual(volume.name, 'libcloud') self.assertEqual(volume.size, 1024) def test_list_volumes(self): disks = self.driver.list_volumes() self.assertTrue(len(disks) > 0) def test_destroy_volume(self): volumes = self.driver.list_volumes() test_vol = list(filter(lambda x: x.name == 'test_disk', volumes))[0] self.assertTrue(self.driver.destroy_volume(test_vol)) def test_attach_volume(self): disks = self.driver.list_volumes() nodes = self.driver.list_nodes() res = self.driver.attach_volume(nodes[0], disks[0]) self.assertTrue(res) def test_detach_volume(self): disks = self.driver.list_volumes() nodes = self.driver.list_nodes() res = self.driver.detach_volume(nodes[0], disks[0]) self.assertTrue(res) def test_ex_list_interfaces(self): ifaces = self.driver.ex_list_interfaces() self.assertTrue(len(ifaces) > 0) def test_ex_attach_interface(self): ifaces = self.driver.ex_list_interfaces() nodes = self.driver.list_nodes() res = self.driver.ex_node_attach_interface(nodes[0], ifaces[0]) self.assertTrue(res) def test_ex_detach_interface(self): ifaces = self.driver.ex_list_interfaces() nodes = self.driver.list_nodes() res = self.driver.ex_node_detach_interface(nodes[0], ifaces[0]) self.assertTrue(res) def test_ex_snapshot_disk(self): disks = self.driver.list_volumes() self.assertTrue(self.driver.ex_snapshot_disk(disks[2])) self.assertRaises(GandiException, self.driver.ex_snapshot_disk, disks[0]) def test_ex_update_disk(self): disks = self.driver.list_volumes() self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) def test_list_key_pairs(self): keys = self.driver.list_key_pairs() self.assertTrue(len(keys) > 0) def test_get_key_pair(self): key = self.driver.get_key_pair(10) self.assertEqual(key.name, 'testkey') def test_import_key_pair_from_string(self): key = self.driver.import_key_pair_from_string('testkey', '12345') self.assertEqual(key.name, 'testkey') self.assertEqual(key.extra['id'], 10) def test_delete_key_pair(self): response = self.driver.delete_key_pair(10) self.assertTrue(response) def test_ex_get_node(self): node = self.driver.ex_get_node(34951) self.assertEqual(node.name, "test2") def test_ex_get_volume(self): volume = self.driver.ex_get_volume(1263) self.assertEqual(volume.name, "libcloud") class GandiRatingTests(unittest.TestCase): """Tests where rating model is involved""" node_name = 'test2' def setUp(self): GandiNodeDriver.connectionCls.conn_class = GandiMockRatingHttp GandiMockRatingHttp.type = None self.driver = GandiNodeDriver(*GANDI_PARAMS) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 4) def test_create_node(self): login = 'libcloud' passwd = ''.join(random.choice(string.ascii_letters) for i in range(10)) # Get france datacenter loc = list(filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations()))[0] # Get a debian image images = self.driver.list_images(loc) images = [x for x in images if x.name.lower().startswith('debian')] img = list(filter(lambda x: '5' in x.name, images))[0] # Get a configuration size size = self.driver.list_sizes()[0] node = self.driver.create_node(name=self.node_name, login=login, password=passwd, image=img, location=loc, size=size) self.assertEqual(node.name, self.node_name) class GandiMockHttp(BaseGandiMockHttp): fixtures = ComputeFileFixtures('gandi') def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): body = self.fixtures.load('datacenter_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_image_list(self, method, url, body, headers): body = self.fixtures.load('image_list_dc0.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_list(self, method, url, body, headers): body = self.fixtures.load('vm_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_ip_list(self, method, url, body, headers): body = self.fixtures.load('ip_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_account_info(self, method, url, body, headers): body = self.fixtures.load('account_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_info(self, method, url, body, headers): body = self.fixtures.load('vm_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_delete(self, method, url, body, headers): body = self.fixtures.load('vm_delete.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__operation_info(self, method, url, body, headers): body = self.fixtures.load('operation_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): body = self.fixtures.load('vm_create_from.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_reboot(self, method, url, body, headers): body = self.fixtures.load('vm_reboot.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_stop(self, method, url, body, headers): body = self.fixtures.load('vm_stop.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_iface_list(self, method, url, body, headers): body = self.fixtures.load('iface_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_list(self, method, url, body, headers): body = self.fixtures.load('disk_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_iface_attach(self, method, url, body, headers): body = self.fixtures.load('iface_attach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_iface_detach(self, method, url, body, headers): body = self.fixtures.load('iface_detach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_disk_attach(self, method, url, body, headers): body = self.fixtures.load('disk_attach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_disk_detach(self, method, url, body, headers): body = self.fixtures.load('disk_detach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_create(self, method, url, body, headers): body = self.fixtures.load('disk_create.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_create_from(self, method, url, body, headers): body = self.fixtures.load('disk_create_from.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_info(self, method, url, body, headers): body = self.fixtures.load('disk_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_update(self, method, url, body, headers): body = self.fixtures.load('disk_update.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_disk_delete(self, method, url, body, headers): body = self.fixtures.load('disk_delete.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_ssh_info(self, method, url, body, headers): body = self.fixtures.load('ssh_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_ssh_list(self, method, url, body, headers): body = self.fixtures.load('ssh_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_ssh_create(self, method, url, body, headers): body = self.fixtures.load('ssh_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_ssh_delete(self, method, url, body, headers): body = self.fixtures.load('ssh_delete.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class GandiMockRatingHttp(BaseGandiMockHttp): """Fixtures needed for tests related to rating model""" fixtures = ComputeFileFixtures('gandi') def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): body = self.fixtures.load('datacenter_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_image_list(self, method, url, body, headers): body = self.fixtures.load('image_list_dc0.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): body = self.fixtures.load('vm_create_from.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__operation_info(self, method, url, body, headers): body = self.fixtures.load('operation_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc__hosting_vm_info(self, method, url, body, headers): body = self.fixtures.load('vm_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) # Specific to rating tests def _xmlrpc__hosting_account_info(self, method, url, body, headers): body = self.fixtures.load('account_info_rating.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_vpsnet.py0000664000175000017500000002071013153541406023600 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.vpsnet import VPSNetNodeDriver from libcloud.compute.base import Node from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.secrets import VPSNET_PARAMS from libcloud.test.file_fixtures import ComputeFileFixtures class VPSNetTests(unittest.TestCase, TestCaseMixin): def setUp(self): VPSNetNodeDriver.connectionCls.conn_class = VPSNetMockHttp self.driver = VPSNetNodeDriver(*VPSNET_PARAMS) def test_create_node(self): VPSNetMockHttp.type = 'create' image = self.driver.list_images()[0] size = self.driver.list_sizes()[0] node = self.driver.create_node('foo', image, size) self.assertEqual(node.name, 'foo') def test_list_nodes(self): VPSNetMockHttp.type = 'virtual_machines' node = self.driver.list_nodes()[0] self.assertEqual(node.id, '1384') self.assertEqual(node.state, NodeState.RUNNING) def test_reboot_node(self): VPSNetMockHttp.type = 'virtual_machines' node = self.driver.list_nodes()[0] VPSNetMockHttp.type = 'reboot' ret = self.driver.reboot_node(node) self.assertEqual(ret, True) def test_destroy_node(self): VPSNetMockHttp.type = 'delete' node = Node('2222', None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) VPSNetMockHttp.type = 'delete_fail' node = Node('2223', None, None, None, None, self.driver) self.assertRaises(Exception, self.driver.destroy_node, node) def test_list_images(self): VPSNetMockHttp.type = 'templates' ret = self.driver.list_images() self.assertEqual(ret[0].id, '9') self.assertEqual(ret[-1].id, '160') def test_list_sizes(self): VPSNetMockHttp.type = 'sizes' ret = self.driver.list_sizes() self.assertEqual(len(ret), 1) self.assertEqual(ret[0].id, '1') self.assertEqual(ret[0].name, '1 Node') def test_destroy_node_response(self): # should return a node object node = Node('2222', None, None, None, None, self.driver) VPSNetMockHttp.type = 'delete' ret = self.driver.destroy_node(node) self.assertTrue(isinstance(ret, bool)) def test_reboot_node_response(self): # should return a node object VPSNetMockHttp.type = 'virtual_machines' node = self.driver.list_nodes()[0] VPSNetMockHttp.type = 'reboot' ret = self.driver.reboot_node(node) self.assertTrue(isinstance(ret, bool)) class VPSNetMockHttp(MockHttp): fixtures = ComputeFileFixtures('vpsnet') def _nodes_api10json_sizes(self, method, url, body, headers): body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _nodes_api10json_create(self, method, url, body, headers): body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _virtual_machines_2222_api10json_delete_fail(self, method, url, body, headers): return (httplib.FORBIDDEN, '', {}, httplib.responses[httplib.FORBIDDEN]) def _virtual_machines_2222_api10json_delete(self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _virtual_machines_1384_reboot_api10json_reboot(self, method, url, body, headers): body = """{ "virtual_machine": { "running": true, "updated_at": "2009-05-15T06:55:02-04:00", "power_action_pending": false, "system_template_id": 41, "id": 1384, "cloud_id": 3, "domain_name": "demodomain.com", "hostname": "web01", "consumer_id": 0, "backups_enabled": false, "password": "a8hjsjnbs91", "label": "foo", "slices_count": null, "created_at": "2009-04-16T08:17:39-04:00" } }""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _virtual_machines_api10json_create(self, method, url, body, headers): body = """{ "virtual_machine": { "running": true, "updated_at": "2009-05-15T06:55:02-04:00", "power_action_pending": false, "system_template_id": 41, "id": 1384, "cloud_id": 3, "domain_name": "demodomain.com", "hostname": "web01", "consumer_id": 0, "backups_enabled": false, "password": "a8hjsjnbs91", "label": "foo", "slices_count": null, "created_at": "2009-04-16T08:17:39-04:00" } }""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _virtual_machines_api10json_virtual_machines(self, method, url, body, headers): body = """ [{ "virtual_machine": { "running": true, "updated_at": "2009-05-15T06:55:02-04:00", "power_action_pending": false, "system_template_id": 41, "id": 1384, "cloud_id": 3, "domain_name": "demodomain.com", "hostname": "web01", "consumer_id": 0, "backups_enabled": false, "password": "a8hjsjnbs91", "label": "Web Server 01", "slices_count": null, "created_at": "2009-04-16T08:17:39-04:00" } }, { "virtual_machine": { "running": true, "updated_at": "2009-05-15T06:55:02-04:00", "power_action_pending": false, "system_template_id": 41, "id": 1385, "cloud_id": 3, "domain_name": "demodomain.com", "hostname": "mysql01", "consumer_id": 0, "backups_enabled": false, "password": "dsi8h38hd2s", "label": "MySQL Server 01", "slices_count": null, "created_at": "2009-04-16T08:17:39-04:00" } }]""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _available_clouds_api10json_templates(self, method, url, body, headers): body = self.fixtures.load('_available_clouds_api10json_templates.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _available_clouds_api10json_create(self, method, url, body, headers): body = """ [{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"}],"id":2,"label":"USA VPS Cloud"}}] """ return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_opennebula.py0000664000175000017500000013136313153541406024420 0ustar kamikami00000000000000# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad # Complutense de Madrid (dsa-research.org) # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenNebula.org test suite. """ __docformat__ = 'epytext' import unittest import sys from libcloud.utils.py3 import httplib from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver from libcloud.compute.drivers.opennebula import OpenNebulaNetwork from libcloud.compute.drivers.opennebula import OpenNebulaNodeSize from libcloud.compute.drivers.opennebula import ACTION import libcloud.compute.drivers.opennebula from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test import MockHttp from libcloud.test.secrets import OPENNEBULA_PARAMS libcloud.compute.drivers.opennebula.API_HOST = 'dummy' class OpenNebula_1_4_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v1.4. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_1_4_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',), host='dummy') def test_create_node(self): """ Test create_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) size = NodeSize(id=1, name='small', ram=None, disk=None, bandwidth=None, price=None, driver=self.driver) networks = list() networks.append(OpenNebulaNetwork(id=5, name='Network 5', address='192.168.0.0', size=256, driver=self.driver)) networks.append(OpenNebulaNetwork(id=15, name='Network 15', address='192.168.1.0', size=256, driver=self.driver)) node = self.driver.create_node(name='Compute 5', image=image, size=size, networks=networks) self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.extra['dev'], 'sda1') def test_destroy_node(self): """ Test destroy_node functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_nodes(self): """ Test list_nodes functionality. """ nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 3) node = nodes[0] self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.extra['dev'], 'sda1') node = nodes[1] self.assertEqual(node.id, '15') self.assertEqual(node.name, 'Compute 15') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.2') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.2') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '15') self.assertEqual(node.image.extra['dev'], 'sda1') node = nodes[2] self.assertEqual(node.id, '25') self.assertEqual(node.name, 'Compute 25') self.assertEqual(node.state, NodeState.UNKNOWN) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.3') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.3') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image, None) def test_list_images(self): """ Test list_images functionality. """ images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] self.assertEqual(image.id, '5') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['size'], '2048') self.assertEqual(image.extra['url'], 'file:///images/ubuntu/jaunty.img') image = images[1] self.assertEqual(image.id, '15') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['size'], '2048') self.assertEqual(image.extra['url'], 'file:///images/ubuntu/jaunty.img') def test_list_sizes(self): """ Test list_sizes functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) def test_list_locations(self): """ Test list_locations functionality. """ locations = self.driver.list_locations() self.assertEqual(len(locations), 1) location = locations[0] self.assertEqual(location.id, '0') self.assertEqual(location.name, '') self.assertEqual(location.country, '') def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') def test_ex_node_action(self): """ Test ex_node_action functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.ex_node_action(node, ACTION.STOP) self.assertTrue(ret) class OpenNebula_2_0_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v2.0 through v2.2. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_2_0_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',), host='dummy') def test_create_node(self): """ Test create_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1, disk=None, bandwidth=None, price=None, driver=self.driver) networks = list() networks.append(OpenNebulaNetwork(id=5, name='Network 5', address='192.168.0.0', size=256, driver=self.driver)) networks.append(OpenNebulaNetwork(id=15, name='Network 15', address='192.168.1.0', size=256, driver=self.driver)) context = {'hostname': 'compute-5'} node = self.driver.create_node(name='Compute 5', image=image, size=size, networks=networks, context=context) self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') self.assertEqual(node.private_ips, []) self.assertTrue(len([s for s in self.driver.list_sizes() if s.id == node.size.id]) == 1) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-5') def test_destroy_node(self): """ Test destroy_node functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_nodes(self): """ Test list_nodes functionality. """ nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 3) node = nodes[0] self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') self.assertEqual(node.private_ips, []) self.assertTrue(len([size for size in self.driver.list_sizes() if size.id == node.size.id]) == 1) self.assertEqual(node.size.id, '1') self.assertEqual(node.size.name, 'small') self.assertEqual(node.size.ram, 1024) self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, int)) self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, int)) self.assertEqual(node.size.cpu, 1) self.assertEqual(node.size.vcpu, None) self.assertEqual(node.size.disk, None) self.assertEqual(node.size.bandwidth, None) self.assertEqual(node.size.price, None) self.assertTrue(len([image for image in self.driver.list_images() if image.id == node.image.id]) == 1) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-5') node = nodes[1] self.assertEqual(node.id, '15') self.assertEqual(node.name, 'Compute 15') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.2') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.2') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02') self.assertEqual(node.private_ips, []) self.assertTrue(len([size for size in self.driver.list_sizes() if size.id == node.size.id]) == 1) self.assertEqual(node.size.id, '1') self.assertEqual(node.size.name, 'small') self.assertEqual(node.size.ram, 1024) self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, int)) self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, int)) self.assertEqual(node.size.cpu, 1) self.assertEqual(node.size.vcpu, None) self.assertEqual(node.size.disk, None) self.assertEqual(node.size.bandwidth, None) self.assertEqual(node.size.price, None) self.assertTrue(len([image for image in self.driver.list_images() if image.id == node.image.id]) == 1) self.assertEqual(node.image.id, '15') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-15') node = nodes[2] self.assertEqual(node.id, '25') self.assertEqual(node.name, 'Compute 25') self.assertEqual(node.state, NodeState.UNKNOWN) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.3') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.3') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03') self.assertEqual(node.private_ips, []) self.assertEqual(node.size, None) self.assertEqual(node.image, None) context = node.extra['context'] self.assertEqual(context, {}) def test_list_images(self): """ Test list_images functionality. """ images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] self.assertEqual(image.id, '5') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['description'], 'Ubuntu 9.04 LAMP Description') self.assertEqual(image.extra['type'], 'OS') self.assertEqual(image.extra['size'], '2048') image = images[1] self.assertEqual(image.id, '15') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['description'], 'Ubuntu 9.04 LAMP Description') self.assertEqual(image.extra['type'], 'OS') self.assertEqual(image.extra['size'], '2048') def test_list_sizes(self): """ Test list_sizes functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 4) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[3] self.assertEqual(size.id, '4') self.assertEqual(size.name, 'custom') self.assertEqual(size.ram, 0) self.assertEqual(size.cpu, 0) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) def test_list_locations(self): """ Test list_locations functionality. """ locations = self.driver.list_locations() self.assertEqual(len(locations), 1) location = locations[0] self.assertEqual(location.id, '0') self.assertEqual(location.name, '') self.assertEqual(location.country, '') def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') class OpenNebula_3_0_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v3.0. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_3_0_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',), host='dummy') def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') self.assertEqual(network.extra['public'], 'YES') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') self.assertEqual(network.extra['public'], 'NO') def test_ex_node_set_save_name(self): """ Test ex_node_action functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) node = Node(5, None, None, None, None, self.driver, image=image) ret = self.driver.ex_node_set_save_name(node, 'test') self.assertTrue(ret) class OpenNebula_3_2_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v3.2. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_3_2_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',), host='dummy') def test_reboot_node(self): """ Test reboot_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) node = Node(5, None, None, None, None, self.driver, image=image) ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_list_sizes(self): """ Test ex_list_networks functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) class OpenNebula_3_6_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v3.6. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_3_6_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',), host='dummy') def test_create_volume(self): new_volume = self.driver.create_volume(1000, 'test-volume') self.assertEqual(new_volume.id, '5') self.assertEqual(new_volume.size, 1000) self.assertEqual(new_volume.name, 'test-volume') def test_destroy_volume(self): images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] ret = self.driver.destroy_volume(image) self.assertTrue(ret) def test_attach_volume(self): nodes = self.driver.list_nodes() node = nodes[0] images = self.driver.list_images() image = images[0] ret = self.driver.attach_volume(node, image, 'sda') self.assertTrue(ret) def test_detach_volume(self): images = self.driver.list_images() image = images[1] ret = self.driver.detach_volume(image) self.assertTrue(ret) nodes = self.driver.list_nodes() # node with only a single associated image node = nodes[1] ret = self.driver.detach_volume(node.image) self.assertFalse(ret) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 2) volume = volumes[0] self.assertEqual(volume.id, '5') self.assertEqual(volume.size, 2048) self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP') volume = volumes[1] self.assertEqual(volume.id, '15') self.assertEqual(volume.size, 1024) self.assertEqual(volume.name, 'Debian Sid') class OpenNebula_3_8_Tests(unittest.TestCase): """ OpenNebula.org test suite for OpenNebula v3.8. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_class = OpenNebula_3_8_MockHttp self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',), host='dummy') def test_list_sizes(self): """ Test ex_list_networks functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) class OpenNebula_1_4_MockHttp(MockHttp): """ Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver. """ fixtures = ComputeFileFixtures('opennebula_1_4') def _compute(self, method, url, body, headers): """ Compute pool resources. """ if method == 'GET': body = self.fixtures.load('computes.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('compute_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _storage(self, method, url, body, headers): """ Storage pool resources. """ if method == 'GET': body = self.fixtures.load('storage.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('disk_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures.load('networks.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_15(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_25(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_25.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_5(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('disk_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('disk_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_2_0_MockHttp(MockHttp): """ Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org compute driver. """ fixtures = ComputeFileFixtures('opennebula_2_0') def _compute(self, method, url, body, headers): """ Compute pool resources. """ if method == 'GET': body = self.fixtures.load('compute_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('compute_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _storage(self, method, url, body, headers): """ Storage pool resources. """ if method == 'GET': body = self.fixtures.load('storage_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('storage_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures.load('network_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_15(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_25(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_25.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_5(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('storage_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('storage_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp): """ Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver. """ fixtures_3_0 = ComputeFileFixtures('opennebula_3_0') def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures_3_0.load('network_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures_3_0.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures_3_0.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp): """ Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver. """ fixtures_3_2 = ComputeFileFixtures('opennebula_3_2') def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _instance_type(self, method, url, body, headers): """ Instance type pool. """ if method == 'GET': body = self.fixtures_3_2.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp): """ Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver. """ fixtures_3_6 = ComputeFileFixtures('opennebula_3_6') def _storage(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('storage_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures_3_6.load('storage_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): if method == 'GET': body = self.fixtures_3_6.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_5_action(self, method, url, body, headers): body = self.fixtures_3_6.load('compute_5.xml') if method == 'POST': return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'GET': return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_15(self, method, url, body, headers): if method == 'GET': body = self.fixtures_3_6.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_10(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures_3_6.load('disk_10.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures_3_6.load('disk_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp): """ Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver. """ fixtures_3_8 = ComputeFileFixtures('opennebula_3_8') def _instance_type(self, method, url, body, headers): """ Instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_small(self, method, url, body, headers): """ Small instance type. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_small.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_medium(self, method, url, body, headers): """ Medium instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_medium.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_large(self, method, url, body, headers): """ Large instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_large.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_bluebox.py0000664000175000017500000001106613153541406023725 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox from libcloud.compute.base import Node, NodeAuthPassword from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import BLUEBOX_PARAMS class BlueboxTest(unittest.TestCase): def setUp(self): Bluebox.connectionCls.conn_class = BlueboxMockHttp self.driver = Bluebox(*BLUEBOX_PARAMS) def test_create_node(self): node = self.driver.create_node( name='foo', size=self.driver.list_sizes()[0], image=self.driver.list_images()[0], auth=NodeAuthPassword("test123") ) self.assertTrue(isinstance(node, Node)) self.assertEqual(node.state, NodeState.PENDING) self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') def test_list_nodes(self): node = self.driver.list_nodes()[0] self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') self.assertEqual(node.state, NodeState.RUNNING) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 4) ids = [s.id for s in sizes] for size in sizes: self.assertTrue(size.price > 0) self.assertTrue('94fd37a7-2606-47f7-84d5-9000deda52ae' in ids) self.assertTrue('b412f354-5056-4bf0-a42f-6ddd998aa092' in ids) self.assertTrue('0cd183d3-0287-4b1a-8288-b3ea8302ed58' in ids) self.assertTrue('b9b87a5b-2885-4a2e-b434-44a163ca6251' in ids) def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(len(images), 10) self.assertEqual(image.name, 'CentOS 5 (Latest Release)') self.assertEqual(image.id, 'c66b8145-f768-45ef-9878-395bf8b1b7ff') def test_reboot_node(self): node = self.driver.list_nodes()[0] ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_destroy_node(self): node = self.driver.list_nodes()[0] ret = self.driver.destroy_node(node) self.assertTrue(ret) class BlueboxMockHttp(MockHttp): fixtures = ComputeFileFixtures('bluebox') def _api_blocks_json(self, method, url, body, headers): if method == "POST": body = self.fixtures.load('api_blocks_json_post.json') else: body = self.fixtures.load('api_blocks_json.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_block_products_json(self, method, url, body, headers): body = self.fixtures.load('api_block_products_json.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_block_templates_json(self, method, url, body, headers): body = self.fixtures.load('api_block_templates_json.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json') else: body = self.fixtures.load( 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(self, method, url, body, headers): body = self.fixtures.load( 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ec2.py0000664000175000017500000025220713153541406022742 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import os import sys from datetime import datetime from libcloud.utils.iso8601 import UTC from libcloud.utils.py3 import httplib from libcloud.compute.drivers.ec2 import EC2NodeDriver from libcloud.compute.drivers.ec2 import EC2PlacementGroup from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver from libcloud.compute.drivers.ec2 import OutscaleSASNodeDriver from libcloud.compute.drivers.ec2 import IdempotentParamError from libcloud.compute.drivers.ec2 import REGION_DETAILS, VALID_EC2_REGIONS from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone from libcloud.compute.drivers.ec2 import EC2NetworkSubnet from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation from libcloud.compute.base import StorageVolume, VolumeSnapshot from libcloud.compute.types import KeyPairDoesNotExistError, StorageVolumeState, \ VolumeSnapshotState from libcloud.test import MockHttp, LibcloudTestCase from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test import unittest from libcloud.test.secrets import EC2_PARAMS null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ '00:00:00:00:00' class BaseEC2Tests(LibcloudTestCase): def test_instantiate_driver_valid_regions(self): regions = REGION_DETAILS.keys() regions = [d for d in regions if d != 'nimbus' and d != 'cn-north-1'] region_endpoints = [ EC2NodeDriver(*EC2_PARAMS, **{'region': region}).connection.host for region in regions ] # Verify that each driver doesn't get the same API host endpoint self.assertEqual(len(region_endpoints), len(set(region_endpoints)), "Multiple Region Drivers were given the same API endpoint") def test_instantiate_driver_invalid_regions(self): for region in ['invalid', 'nimbus']: try: EC2NodeDriver(*EC2_PARAMS, **{'region': region}) except ValueError: pass else: self.fail('Invalid region, but exception was not thrown') def test_list_sizes_valid_regions(self): unsupported_regions = list() for region in VALID_EC2_REGIONS: driver = EC2NodeDriver(*EC2_PARAMS, **{'region': region}) try: driver.list_sizes() except: unsupported_regions.append(region) if unsupported_regions: self.fail('Cannot list sizes from ec2 regions: %s' % unsupported_regions) class EC2Tests(LibcloudTestCase, TestCaseMixin): image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml' region = 'us-east-1' def setUp(self): EC2MockHttp.test = self EC2NodeDriver.connectionCls.conn_class = EC2MockHttp EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None self.driver = EC2NodeDriver(*EC2_PARAMS, **{'region': self.region}) def test_instantiate_driver_with_token(self): token = 'temporary_credentials_token' driver = EC2NodeDriver(*EC2_PARAMS, **{'region': self.region, 'token': token}) self.assertTrue(hasattr(driver, 'token'), 'Driver has no attribute token') self.assertEquals(token, driver.token, "Driver token does not match with provided token") def test_driver_with_token_signature_version(self): token = 'temporary_credentials_token' driver = EC2NodeDriver(*EC2_PARAMS, **{'region': self.region, 'token': token}) kwargs = driver._ex_connection_class_kwargs() self.assertIn('signature_version', kwargs) self.assertEquals('4', kwargs['signature_version'], 'Signature version is not 4 with temporary credentials') def test_create_node(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size) self.assertEqual(node.id, 'i-2ba64342') self.assertEqual(node.name, 'foo') self.assertEqual(node.extra['tags']['Name'], 'foo') self.assertEqual(len(node.extra['tags']), 1) def test_create_node_with_ex_mincount(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size, ex_mincount=1, ex_maxcount=10) self.assertEqual(node.id, 'i-2ba64342') self.assertEqual(node.name, 'foo') self.assertEqual(node.extra['tags']['Name'], 'foo') self.assertEqual(len(node.extra['tags']), 1) def test_create_node_with_ex_assign_public_ip(self): # assertions are done in _create_ex_assign_public_ip_RunInstances EC2MockHttp.type = 'create_ex_assign_public_ip' image = NodeImage(id='ami-11111111', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) subnet = EC2NetworkSubnet('subnet-11111111', "test_subnet", "pending") self.driver.create_node( name='foo', image=image, size=size, ex_subnet=subnet, ex_security_group_ids=[ 'sg-11111111' ], ex_assign_public_ip=True, ) def test_create_node_with_ex_terminate_on_shutdown(self): EC2MockHttp.type = 'create_ex_terminate_on_shutdown' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) # The important part about the test is asserted inside # EC2MockHttp._create_ex_terminate_on_shutdown self.driver.create_node(name='foo', image=image, size=size, ex_terminate_on_shutdown=True) def test_create_node_with_metadata(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size, ex_metadata={'Bar': 'baz', 'Num': '42'}) self.assertEqual(node.name, 'foo') self.assertEqual(node.extra['tags']['Name'], 'foo') self.assertEqual(node.extra['tags']['Bar'], 'baz') self.assertEqual(node.extra['tags']['Num'], '42') self.assertEqual(len(node.extra['tags']), 3) def test_create_node_idempotent(self): EC2MockHttp.type = 'idempotent' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) token = 'testclienttoken' node = self.driver.create_node(name='foo', image=image, size=size, ex_clienttoken=token) self.assertEqual(node.id, 'i-2ba64342') self.assertEqual(node.extra['client_token'], token) # from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html # If you repeat the request with the same client token, but change # another request parameter, Amazon EC2 returns an # IdempotentParameterMismatch error. # In our case, changing the parameter doesn't actually matter since we # are forcing the error response fixture. EC2MockHttp.type = 'idempotent_mismatch' idem_error = None # different count try: self.driver.create_node(name='foo', image=image, size=size, ex_mincount='2', ex_maxcount='2', ex_clienttoken=token) except IdempotentParamError: e = sys.exc_info()[1] idem_error = e self.assertTrue(idem_error is not None) def test_create_node_no_availability_zone(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size) location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver) self.assertEqual(node.id, 'i-2ba64342') node = self.driver.create_node(name='foo', image=image, size=size, location=location) self.assertEqual(node.id, 'i-2ba64342') self.assertEqual(node.name, 'foo') def test_list_nodes(self): node = self.driver.list_nodes()[0] public_ips = sorted(node.public_ips) self.assertEqual(node.id, 'i-4382922a') self.assertEqual(node.name, node.id) self.assertEqual(len(node.public_ips), 2) self.assertEqual(node.extra['launch_time'], '2013-12-02T11:58:11.000Z') self.assertEqual(node.created_at, datetime(2013, 12, 2, 11, 58, 11, tzinfo=UTC)) self.assertTrue('instance_type' in node.extra) self.assertEqual(node.extra['availability'], 'us-east-1d') self.assertEqual(node.extra['key_name'], 'fauxkey') self.assertEqual(node.extra['monitoring'], 'disabled') self.assertEqual(node.extra['image_id'], 'ami-3215fe5a') self.assertEqual(len(node.extra['groups']), 2) self.assertEqual(len(node.extra['block_device_mapping']), 1) self.assertEqual(node.extra['block_device_mapping'][0]['device_name'], '/dev/sda1') self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['volume_id'], 'vol-5e312311') self.assertTrue(node.extra['block_device_mapping'][0]['ebs']['delete']) self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['status'], 'attached') self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['attach_time'], datetime(2013, 4, 9, 18, 1, 1, tzinfo=UTC)) self.assertEqual(public_ips[0], '1.2.3.4') nodes = self.driver.list_nodes(ex_node_ids=['i-4382922a', 'i-8474834a']) ret_node1 = nodes[0] ret_node2 = nodes[1] self.assertEqual(ret_node1.id, 'i-4382922a') self.assertEqual(ret_node2.id, 'i-8474834a') self.assertEqual(ret_node2.name, 'Test Server 2') self.assertEqual(ret_node2.extra['subnet_id'], 'subnet-5fd9d412') self.assertEqual(ret_node2.extra['vpc_id'], 'vpc-61dcd30e') self.assertEqual(ret_node2.extra['tags']['Group'], 'VPC Test') self.assertEqual(ret_node1.extra['launch_time'], '2013-12-02T11:58:11.000Z') self.assertEqual(ret_node1.created_at, datetime(2013, 12, 2, 11, 58, 11, tzinfo=UTC)) self.assertEqual(ret_node2.extra['launch_time'], '2013-12-02T15:58:29.000Z') self.assertEqual(ret_node2.created_at, datetime(2013, 12, 2, 15, 58, 29, tzinfo=UTC)) self.assertIn('instance_type', ret_node1.extra) self.assertIn('instance_type', ret_node2.extra) def test_ex_list_reserved_nodes(self): node = self.driver.ex_list_reserved_nodes()[0] self.assertEqual(node.id, '93bbbca2-c500-49d0-9ede-9d8737400498') self.assertEqual(node.state, 'active') self.assertEqual(node.extra['instance_type'], 't1.micro') self.assertEqual(node.extra['availability'], 'us-east-1b') self.assertEqual(node.extra['start'], '2013-06-18T12:07:53.161Z') self.assertEqual(node.extra['end'], '2014-06-18T12:07:53.161Z') self.assertEqual(node.extra['duration'], 31536000) self.assertEqual(node.extra['usage_price'], 0.012) self.assertEqual(node.extra['fixed_price'], 23.0) self.assertEqual(node.extra['instance_count'], 1) self.assertEqual(node.extra['description'], 'Linux/UNIX') self.assertEqual(node.extra['instance_tenancy'], 'default') self.assertEqual(node.extra['currency_code'], 'USD') self.assertEqual(node.extra['offering_type'], 'Light Utilization') def test_list_location(self): locations = self.driver.list_locations() self.assertTrue(len(locations) > 0) self.assertEqual(locations[0].name, 'eu-west-1a') self.assertTrue(locations[0].availability_zone is not None) self.assertTrue(isinstance(locations[0].availability_zone, ExEC2AvailabilityZone)) def test_list_security_groups(self): groups = self.driver.ex_list_security_groups() self.assertEqual(groups, ['WebServers', 'RangedPortsBySource']) def test_ex_delete_security_group_by_id(self): group_id = 'sg-443d0a12' retValue = self.driver.ex_delete_security_group_by_id(group_id) self.assertTrue(retValue) def test_delete_security_group_by_name(self): group_name = 'WebServers' retValue = self.driver.ex_delete_security_group_by_name(group_name) self.assertTrue(retValue) def test_ex_delete_security_group(self): name = 'WebServers' retValue = self.driver.ex_delete_security_group(name) self.assertTrue(retValue) def test_authorize_security_group(self): resp = self.driver.ex_authorize_security_group('TestGroup', '22', '22', '0.0.0.0/0') self.assertTrue(resp) def test_authorize_security_group_ingress(self): ranges = ['1.1.1.1/32', '2.2.2.2/32'] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) self.assertTrue(resp) groups = [{'group_id': 'sg-949265ff'}] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 23, group_pairs=groups) self.assertTrue(resp) def test_authorize_security_group_egress(self): ranges = ['1.1.1.1/32', '2.2.2.2/32'] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) self.assertTrue(resp) groups = [{'group_id': 'sg-949265ff'}] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) self.assertTrue(resp) def test_revoke_security_group_ingress(self): ranges = ['1.1.1.1/32', '2.2.2.2/32'] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) self.assertTrue(resp) groups = [{'group_id': 'sg-949265ff'}] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) self.assertTrue(resp) def test_revoke_security_group_egress(self): ranges = ['1.1.1.1/32', '2.2.2.2/32'] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) self.assertTrue(resp) groups = [{'group_id': 'sg-949265ff'}] resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) self.assertTrue(resp) def test_reboot_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_ex_start_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) ret = self.driver.ex_start_node(node) self.assertTrue(ret) def test_ex_stop_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) ret = self.driver.ex_stop_node(node) self.assertTrue(ret) def test_ex_create_node_with_ex_blockdevicemappings(self): EC2MockHttp.type = 'create_ex_blockdevicemappings' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) mappings = [ {'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10}, {'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}, {'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'} ] node = self.driver.create_node(name='foo', image=image, size=size, ex_blockdevicemappings=mappings) self.assertEqual(node.id, 'i-2ba64342') def test_ex_create_node_with_ex_blockdevicemappings_attribute_error(self): EC2MockHttp.type = 'create_ex_blockdevicemappings' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) mappings = 'this should be a list' self.assertRaises(AttributeError, self.driver.create_node, name='foo', image=image, size=size, ex_blockdevicemappings=mappings) mappings = ['this should be a dict'] self.assertRaises(AttributeError, self.driver.create_node, name='foo', image=image, size=size, ex_blockdevicemappings=mappings) def test_destroy_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_sizes(self): region_old = self.driver.region_name names = [ ('ec2_us_east', 'us-east-1'), ('ec2_us_west', 'us-west-1'), ('ec2_us_west', 'us-west-2'), ('ec2_eu_west', 'eu-west-1'), ('ec2_ap_southeast', 'ap-southeast-1'), ('ec2_ap_northeast', 'ap-northeast-1'), ('ec2_ap_southeast_2', 'ap-southeast-2'), ('ec2_ap_south_1', 'ap-south-1') ] for api_name, region_name in names: self.driver.api_name = api_name self.driver.region_name = region_name sizes = self.driver.list_sizes() ids = [s.id for s in sizes] if region_name not in ['ap-south-1']: self.assertTrue('t1.micro' in ids) self.assertTrue('m1.small' in ids) self.assertTrue('m1.large' in ids) self.assertTrue('m1.xlarge' in ids) self.assertTrue('c1.medium' in ids) self.assertTrue('c1.xlarge' in ids) self.assertTrue('m2.xlarge' in ids) self.assertTrue('m2.2xlarge' in ids) self.assertTrue('m2.4xlarge' in ids) if region_name == 'us-east-1': self.assertEqual(len(sizes), 70) self.assertTrue('cg1.4xlarge' in ids) self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) self.assertTrue('x1.32xlarge' in ids) elif region_name == 'us-west-1': self.assertEqual(len(sizes), 61) if region_name == 'us-west-2': self.assertEqual(len(sizes), 71) elif region_name == 'ap-southeast-1': self.assertEqual(len(sizes), 57) elif region_name == 'ap-southeast-2': self.assertEqual(len(sizes), 61) elif region_name == 'eu-west-1': self.assertEqual(len(sizes), 68) elif region_name == 'ap-south-1': self.assertEqual(len(sizes), 41) self.driver.region_name = region_old def test_ex_create_node_with_ex_iam_profile(self): iamProfile = { 'id': 'AIDGPMS9RO4H3FEXAMPLE', 'name': 'Foo', 'arn': 'arn:aws:iam:...' } image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) EC2MockHttp.type = None node1 = self.driver.create_node(name='foo', image=image, size=size) EC2MockHttp.type = 'ex_iam_profile' node2 = self.driver.create_node(name='bar', image=image, size=size, ex_iam_profile=iamProfile['name']) node3 = self.driver.create_node(name='bar', image=image, size=size, ex_iam_profile=iamProfile['arn']) self.assertFalse(node1.extra['iam_profile']) self.assertEqual(node2.extra['iam_profile'], iamProfile['id']) self.assertEqual(node3.extra['iam_profile'], iamProfile['id']) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 2) location = '123456788908/Test Image' self.assertEqual(images[0].id, 'ami-57ba933a') self.assertEqual(images[0].name, 'Test Image') self.assertEqual(images[0].extra['image_location'], location) self.assertEqual(images[0].extra['architecture'], 'x86_64') self.assertEqual(len(images[0].extra['block_device_mapping']), 2) ephemeral = images[0].extra['block_device_mapping'][1]['virtual_name'] self.assertEqual(ephemeral, 'ephemeral0') billing_product1 = images[0].extra['billing_products'][0] self.assertEqual(billing_product1, 'ab-5dh78019') location = '123456788908/Test Image 2' self.assertEqual(images[1].id, 'ami-85b2a8ae') self.assertEqual(images[1].name, 'Test Image 2') self.assertEqual(images[1].extra['image_location'], location) self.assertEqual(images[1].extra['architecture'], 'x86_64') size = images[1].extra['block_device_mapping'][0]['ebs']['volume_size'] billing_product2 = images[1].extra['billing_products'][0] self.assertEqual(billing_product2, 'as-6dr90319') self.assertEqual(size, 20) def test_list_images_with_image_ids(self): EC2MockHttp.type = 'ex_imageids' images = self.driver.list_images(ex_image_ids=['ami-57ba933a']) self.assertEqual(len(images), 1) self.assertEqual(images[0].name, 'Test Image') def test_list_images_with_executable_by(self): images = self.driver.list_images(ex_executableby='self') self.assertEqual(len(images), 2) def test_get_image(self): image = self.driver.get_image('ami-57ba933a') self.assertEqual(image.id, 'ami-57ba933a') self.assertEqual(image.name, 'Test Image') self.assertEqual(image.extra['architecture'], 'x86_64') self.assertEqual(len(image.extra['block_device_mapping']), 2) self.assertEqual(image.extra['billing_products'][0], 'ab-5dh78019') def test_copy_image(self): image = self.driver.list_images()[0] resp = self.driver.copy_image(image, 'us-east-1', name='Faux Image', description='Test Image Copy') self.assertEqual(resp.id, 'ami-4db38224') def test_create_image(self): node = self.driver.list_nodes()[0] mapping = [{'VirtualName': None, 'Ebs': {'VolumeSize': 10, 'VolumeType': 'standard', 'DeleteOnTermination': 'true'}, 'DeviceName': '/dev/sda1'}] resp = self.driver.create_image(node, 'New Image', description='New EBS Image', block_device_mapping=mapping) self.assertEqual(resp.id, 'ami-e9b38280') def test_create_image_no_mapping(self): node = self.driver.list_nodes()[0] resp = self.driver.create_image(node, 'New Image', description='New EBS Image') self.assertEqual(resp.id, 'ami-e9b38280') def delete_image(self): images = self.driver.list_images() image = images[0] resp = self.driver.delete_image(image) self.assertTrue(resp) def ex_register_image(self): mapping = [{'DeviceName': '/dev/sda1', 'Ebs': {'SnapshotId': 'snap-5ade3e4e'}}] image = self.driver.ex_register_image(name='Test Image', root_device_name='/dev/sda1', description='My Image', architecture='x86_64', block_device_mapping=mapping, ena_support=True, billing_products=['ab-5dh78019'], sriov_net_support='simple') self.assertEqual(image.id, 'ami-57c2fb3e') def test_ex_import_snapshot(self): disk_container = [{'Description': 'Dummy import snapshot task', 'Format': 'raw', 'UserBucket': {'S3Bucket': 'dummy-bucket', 'S3Key': 'dummy-key'}}] snap = self.driver.ex_import_snapshot(disk_container=disk_container) self.assertEqual(snap.id, 'snap-0ea83e8a87e138f39') def test_wait_for_import_snapshot_completion(self): snap = self.driver._wait_for_import_snapshot_completion( import_task_id='import-snap-fhdysyq6') self.assertEqual(snap.id, 'snap-0ea83e8a87e138f39') def test_timeout_wait_for_import_snapshot_completion(self): import_task_id = 'import-snap-fhdysyq6' EC2MockHttp.type = 'timeout' with self.assertRaises(Exception) as context: self.driver._wait_for_import_snapshot_completion( import_task_id=import_task_id, timeout=0.01, interval=0.001) self.assertEqual('Timeout while waiting for import task Id %s' % import_task_id, str(context.exception)) def test_ex_describe_import_snapshot_tasks(self): snap = self.driver.ex_describe_import_snapshot_tasks( import_task_id='import-snap-fh7y6i6w<') self.assertEqual(snap.snapshotId, 'snap-0ea83e8a87e138f39') self.assertEqual(snap.status, 'completed') def test_ex_list_availability_zones(self): availability_zones = self.driver.ex_list_availability_zones() availability_zone = availability_zones[0] self.assertTrue(len(availability_zones) > 0) self.assertEqual(availability_zone.name, 'eu-west-1a') self.assertEqual(availability_zone.zone_state, 'available') self.assertEqual(availability_zone.region_name, 'eu-west-1') def test_list_keypairs(self): keypairs = self.driver.list_key_pairs() self.assertEqual(len(keypairs), 1) self.assertEqual(keypairs[0].name, 'gsg-keypair') self.assertEqual(keypairs[0].fingerprint, null_fingerprint) # Test old deprecated method keypairs = self.driver.ex_list_keypairs() self.assertEqual(len(keypairs), 1) self.assertEqual(keypairs[0]['keyName'], 'gsg-keypair') self.assertEqual(keypairs[0]['keyFingerprint'], null_fingerprint) def test_get_key_pair(self): EC2MockHttp.type = 'get_one' key_pair = self.driver.get_key_pair(name='gsg-keypair') self.assertEqual(key_pair.name, 'gsg-keypair') def test_get_key_pair_does_not_exist(self): EC2MockHttp.type = 'doesnt_exist' self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, name='test-key-pair') def test_create_key_pair(self): key_pair = self.driver.create_key_pair(name='test-keypair') fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d' ':37:2d:7d:b8:ca:9f:f5:f1:6f') self.assertEqual(key_pair.name, 'my-key-pair') self.assertEqual(key_pair.fingerprint, fingerprint) self.assertTrue(key_pair.private_key is not None) # Test old and deprecated method key_pair = self.driver.ex_create_keypair(name='test-keypair') self.assertEqual(key_pair['keyFingerprint'], fingerprint) self.assertTrue(key_pair['keyMaterial'] is not None) def test_ex_describe_all_keypairs(self): keys = self.driver.ex_describe_all_keypairs() self.assertEqual(keys, ['gsg-keypair']) def test_list_key_pairs(self): keypair1 = self.driver.list_key_pairs()[0] self.assertEqual(keypair1.name, 'gsg-keypair') self.assertEqual(keypair1.fingerprint, null_fingerprint) # Test backward compatibility keypair2 = self.driver.ex_describe_keypairs('gsg-keypair') self.assertEqual(keypair2['keyName'], 'gsg-keypair') self.assertEqual(keypair2['keyFingerprint'], null_fingerprint) def test_delete_key_pair(self): keypair = self.driver.list_key_pairs()[0] success = self.driver.delete_key_pair(keypair) self.assertTrue(success) # Test old and deprecated method resp = self.driver.ex_delete_keypair('gsg-keypair') self.assertTrue(resp) def test_ex_describe_tags(self): node = Node('i-4382922a', None, None, None, None, self.driver) tags = self.driver.ex_describe_tags(resource=node) self.assertEqual(len(tags), 3) self.assertTrue('tag' in tags) self.assertTrue('owner' in tags) self.assertTrue('stack' in tags) def test_import_key_pair_from_string(self): path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') with open(path, 'r') as fp: key_material = fp.read() key = self.driver.import_key_pair_from_string(name='keypair', key_material=key_material) self.assertEqual(key.name, 'keypair') self.assertEqual(key.fingerprint, null_fingerprint) # Test old and deprecated method key = self.driver.ex_import_keypair_from_string('keypair', key_material) self.assertEqual(key['keyName'], 'keypair') self.assertEqual(key['keyFingerprint'], null_fingerprint) def test_import_key_pair_from_file(self): path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') key = self.driver.import_key_pair_from_file('keypair', path) self.assertEqual(key.name, 'keypair') self.assertEqual(key.fingerprint, null_fingerprint) # Test old and deprecated method key = self.driver.ex_import_keypair('keypair', path) self.assertEqual(key['keyName'], 'keypair') self.assertEqual(key['keyFingerprint'], null_fingerprint) def test_ex_create_tags(self): node = Node('i-4382922a', None, None, None, None, self.driver) self.driver.ex_create_tags(node, {'sample': 'tag'}) def test_ex_delete_tags(self): node = Node('i-4382922a', None, None, None, None, self.driver) self.driver.ex_delete_tags(node, {'sample': 'tag'}) def test_ex_delete_tags2(self): node = Node('i-4382922a', None, None, None, None, self.driver) self.driver.ex_create_tags(node, {'sample': 'another tag'}) self.driver.ex_delete_tags(node, {'sample': None}) def test_ex_describe_addresses_for_node(self): node1 = Node('i-4382922a', None, None, None, None, self.driver) ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1) node2 = Node('i-4382922b', None, None, None, None, self.driver) ip_addresses2 = sorted( self.driver.ex_describe_addresses_for_node(node2)) node3 = Node('i-4382922g', None, None, None, None, self.driver) ip_addresses3 = sorted( self.driver.ex_describe_addresses_for_node(node3)) self.assertEqual(len(ip_addresses1), 1) self.assertEqual(ip_addresses1[0], '1.2.3.4') self.assertEqual(len(ip_addresses2), 2) self.assertEqual(ip_addresses2[0], '1.2.3.5') self.assertEqual(ip_addresses2[1], '1.2.3.6') self.assertEqual(len(ip_addresses3), 0) def test_ex_describe_addresses(self): node1 = Node('i-4382922a', None, None, None, None, self.driver) node2 = Node('i-4382922g', None, None, None, None, self.driver) nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1]) nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2]) self.assertEqual(len(nodes_elastic_ips1), 1) self.assertTrue(node1.id in nodes_elastic_ips1) self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4']) self.assertEqual(len(nodes_elastic_ips2), 1) self.assertTrue(node2.id in nodes_elastic_ips2) self.assertEqual(nodes_elastic_ips2[node2.id], []) def test_ex_describe_all_addresses(self): EC2MockHttp.type = 'all_addresses' elastic_ips1 = self.driver.ex_describe_all_addresses() elastic_ips2 = self.driver.ex_describe_all_addresses( only_associated=True) self.assertEqual('1.2.3.7', elastic_ips1[3].ip) self.assertEqual('vpc', elastic_ips1[3].domain) self.assertEqual('eipalloc-992a5cf8', elastic_ips1[3].extra['allocation_id']) self.assertEqual(len(elastic_ips2), 2) self.assertEqual('1.2.3.5', elastic_ips2[1].ip) self.assertEqual('vpc', elastic_ips2[1].domain) def test_ex_allocate_address(self): elastic_ip = self.driver.ex_allocate_address() self.assertEqual('192.0.2.1', elastic_ip.ip) self.assertEqual('standard', elastic_ip.domain) EC2MockHttp.type = 'vpc' elastic_ip = self.driver.ex_allocate_address(domain='vpc') self.assertEqual('192.0.2.2', elastic_ip.ip) self.assertEqual('vpc', elastic_ip.domain) self.assertEqual('eipalloc-666d7f04', elastic_ip.extra['allocation_id']) def test_ex_release_address(self): EC2MockHttp.type = 'all_addresses' elastic_ips = self.driver.ex_describe_all_addresses() EC2MockHttp.type = '' ret = self.driver.ex_release_address(elastic_ips[2]) self.assertTrue(ret) ret = self.driver.ex_release_address(elastic_ips[0], domain='vpc') self.assertTrue(ret) self.assertRaises(AttributeError, self.driver.ex_release_address, elastic_ips[0], domain='bogus') def test_ex_associate_address_with_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) EC2MockHttp.type = 'all_addresses' elastic_ips = self.driver.ex_describe_all_addresses() EC2MockHttp.type = '' ret1 = self.driver.ex_associate_address_with_node( node, elastic_ips[2]) ret2 = self.driver.ex_associate_addresses( node, elastic_ips[2]) self.assertEqual(None, ret1) self.assertEqual(None, ret2) EC2MockHttp.type = 'vpc' ret3 = self.driver.ex_associate_address_with_node( node, elastic_ips[3], domain='vpc') ret4 = self.driver.ex_associate_addresses( node, elastic_ips[3], domain='vpc') self.assertEqual('eipassoc-167a8073', ret3) self.assertEqual('eipassoc-167a8073', ret4) self.assertRaises(AttributeError, self.driver.ex_associate_address_with_node, node, elastic_ips[1], domain='bogus') def test_ex_disassociate_address(self): EC2MockHttp.type = 'all_addresses' elastic_ips = self.driver.ex_describe_all_addresses() EC2MockHttp.type = '' ret = self.driver.ex_disassociate_address(elastic_ips[2]) self.assertTrue(ret) # Test a VPC disassociation ret = self.driver.ex_disassociate_address(elastic_ips[1], domain='vpc') self.assertTrue(ret) self.assertRaises(AttributeError, self.driver.ex_disassociate_address, elastic_ips[1], domain='bogus') def test_ex_change_node_size_same_size(self): size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = Node('i-4382922a', None, None, None, None, self.driver, extra={'instancetype': 'm1.small'}) try: self.driver.ex_change_node_size(node=node, new_size=size) except ValueError: pass else: self.fail('Same size was passed, but an exception was not thrown') def test_ex_change_node_size(self): size = NodeSize('m1.large', 'Small Instance', None, None, None, None, driver=self.driver) node = Node('i-4382922a', None, None, None, None, self.driver, extra={'instancetype': 'm1.small'}) result = self.driver.ex_change_node_size(node=node, new_size=size) self.assertTrue(result) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 3) self.assertEqual('vol-10ae5e2b', volumes[0].id) self.assertEqual(1, volumes[0].size) self.assertEqual('available', volumes[0].extra['state']) self.assertEqual(StorageVolumeState.AVAILABLE, volumes[0].state) self.assertEqual('vol-v24bfh75', volumes[1].id) self.assertEqual(11, volumes[1].size) self.assertIsNone(volumes[1].extra['snapshot_id']) self.assertEqual('in-use', volumes[1].extra['state']) self.assertEqual(StorageVolumeState.INUSE, volumes[1].state) self.assertEqual('vol-b6c851ec', volumes[2].id) self.assertEqual(8, volumes[2].size) self.assertEqual('some-unknown-status', volumes[2].extra['state']) self.assertEqual('i-d334b4b3', volumes[2].extra['instance_id']) self.assertEqual('/dev/sda1', volumes[2].extra['device']) self.assertEqual('snap-30d37269', volumes[2].extra['snapshot_id']) self.assertEqual(StorageVolumeState.UNKNOWN, volumes[2].state) def test_create_volume(self): location = self.driver.list_locations()[0] vol = self.driver.create_volume(10, 'vol', location) self.assertEqual(10, vol.size) self.assertEqual('vol', vol.name) self.assertEqual('creating', vol.extra['state']) self.assertTrue(isinstance(vol.extra['create_time'], datetime)) def test_destroy_volume(self): vol = StorageVolume(id='vol-4282672b', name='test', state=StorageVolumeState.AVAILABLE, size=10, driver=self.driver) retValue = self.driver.destroy_volume(vol) self.assertTrue(retValue) def test_attach(self): vol = StorageVolume(id='vol-4282672b', name='test', size=10, state=StorageVolumeState.AVAILABLE, driver=self.driver) node = Node('i-4382922a', None, None, None, None, self.driver) retValue = self.driver.attach_volume(node, vol, '/dev/sdh') self.assertTrue(retValue) def test_detach(self): vol = StorageVolume(id='vol-4282672b', name='test', state=StorageVolumeState.INUSE, size=10, driver=self.driver) retValue = self.driver.detach_volume(vol) self.assertTrue(retValue) def test_create_volume_snapshot(self): vol = StorageVolume(id='vol-4282672b', name='test', state=StorageVolumeState.AVAILABLE, size=10, driver=self.driver) snap = self.driver.create_volume_snapshot( vol, 'Test snapshot') self.assertEqual('snap-a7cb2hd9', snap.id) self.assertEqual(vol.size, snap.size) self.assertEqual('Test snapshot', snap.extra['name']) self.assertEqual(vol.id, snap.extra['volume_id']) self.assertEqual('pending', snap.extra['state']) self.assertEqual(VolumeSnapshotState.CREATING, snap.state) # 2013-08-15T16:22:30.000Z self.assertEqual(datetime(2013, 8, 15, 16, 22, 30, tzinfo=UTC), snap.created) def test_list_snapshots(self): snaps = self.driver.list_snapshots() self.assertEqual(len(snaps), 3) self.assertEqual('snap-428abd35', snaps[0].id) self.assertEqual(VolumeSnapshotState.CREATING, snaps[0].state) self.assertEqual('vol-e020df80', snaps[0].extra['volume_id']) self.assertEqual(30, snaps[0].size) self.assertEqual('Daily Backup', snaps[0].extra['description']) self.assertEqual('snap-18349159', snaps[1].id) self.assertEqual('DB Backup 1', snaps[1].name) self.assertEqual(VolumeSnapshotState.AVAILABLE, snaps[1].state) self.assertEqual('vol-b5a2c1v9', snaps[1].extra['volume_id']) self.assertEqual(15, snaps[1].size) self.assertEqual('Weekly backup', snaps[1].extra['description']) self.assertEqual('DB Backup 1', snaps[1].extra['name']) def test_list_volume_snapshots(self): volume = self.driver.list_volumes()[0] assert volume.id == 'vol-10ae5e2b' snapshots = self.driver.list_volume_snapshots(volume) self.assertEqual(len(snapshots), 1) self.assertEqual(snapshots[0].id, 'snap-18349160') def test_destroy_snapshot(self): snap = VolumeSnapshot(id='snap-428abd35', size=10, driver=self.driver) resp = snap.destroy() self.assertTrue(resp) def test_ex_modify_image_attribute(self): images = self.driver.list_images() image = images[0] data = {'LaunchPermission.Add.1.Group': 'all'} resp = self.driver.ex_modify_image_attribute(image, data) self.assertTrue(resp) def test_ex_modify_snapshot_attribute(self): snap = VolumeSnapshot(id='snap-1234567890abcdef0', size=10, driver=self.driver) data = {'CreateVolumePermission.Add.1.Group': 'all'} resp = self.driver.ex_modify_snapshot_attribute(snap, data) self.assertTrue(resp) def test_create_node_ex_security_groups(self): EC2MockHttp.type = 'ex_security_groups' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) security_groups = ['group1', 'group2'] # Old, deprecated argument name self.driver.create_node(name='foo', image=image, size=size, ex_securitygroup=security_groups) # New argument name self.driver.create_node(name='foo', image=image, size=size, ex_security_groups=security_groups) # Test old and new arguments are mutually exclusive self.assertRaises(ValueError, self.driver.create_node, name='foo', image=image, size=size, ex_securitygroup=security_groups, ex_security_groups=security_groups) def test_create_node_ex_security_group_ids(self): EC2MockHttp.type = 'ex_security_group_ids' image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) subnet = EC2NetworkSubnet(12345, "test_subnet", "pending") security_groups = ['sg-1aa11a1a', 'sg-2bb22b2b'] self.driver.create_node(name='foo', image=image, size=size, ex_security_group_ids=security_groups, ex_subnet=subnet) self.assertRaises(ValueError, self.driver.create_node, name='foo', image=image, size=size, ex_security_group_ids=security_groups) def test_ex_get_metadata_for_node(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, driver=self.driver) size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) node = self.driver.create_node(name='foo', image=image, size=size, ex_metadata={'Bar': 'baz', 'Num': '42'}) metadata = self.driver.ex_get_metadata_for_node(node) self.assertEqual(metadata['Name'], 'foo') self.assertEqual(metadata['Bar'], 'baz') self.assertEqual(metadata['Num'], '42') self.assertEqual(len(metadata), 3) def test_ex_get_limits(self): limits = self.driver.ex_get_limits() expected = {'max-instances': 20, 'vpc-max-elastic-ips': 5, 'max-elastic-ips': 5} self.assertEqual(limits['resource'], expected) def test_ex_create_security_group(self): group = self.driver.ex_create_security_group("WebServers", "Rules to protect web nodes", "vpc-143cab4") self.assertEqual(group["group_id"], "sg-52e2f530") def test_ex_create_placement_groups(self): resp = self.driver.ex_create_placement_group("NewPG") self.assertTrue(resp) def test_ex_delete_placement_groups(self): pgs = self.driver.ex_list_placement_groups() pg = pgs[0] resp = self.driver.ex_delete_placement_group(pg.name) self.assertTrue(resp) def test_ex_list_placement_groups(self): pgs = self.driver.ex_list_placement_groups() self.assertEqual(len(pgs), 2) self.assertIsInstance(pgs[0], EC2PlacementGroup) def test_ex_list_networks(self): vpcs = self.driver.ex_list_networks() self.assertEqual(len(vpcs), 2) self.assertEqual('vpc-532335e1', vpcs[0].id) self.assertEqual('vpc-532335e1', vpcs[0].name) self.assertEqual('192.168.51.0/24', vpcs[0].cidr_block) self.assertEqual('available', vpcs[0].extra['state']) self.assertEqual('dopt-7eded312', vpcs[0].extra['dhcp_options_id']) self.assertEqual('vpc-62ded30e', vpcs[1].id) self.assertEqual('Test VPC', vpcs[1].name) self.assertEqual('192.168.52.0/24', vpcs[1].cidr_block) self.assertEqual('available', vpcs[1].extra['state']) self.assertEqual('dopt-7eded312', vpcs[1].extra['dhcp_options_id']) def test_ex_list_networks_network_ids(self): EC2MockHttp.type = 'network_ids' network_ids = ['vpc-532335e1'] # We assert in the mock http method self.driver.ex_list_networks(network_ids=network_ids) def test_ex_list_networks_filters(self): EC2MockHttp.type = 'filters' filters = {'dhcp-options-id': 'dopt-7eded312', # matches two networks 'cidr': '192.168.51.0/24'} # matches one network # We assert in the mock http method self.driver.ex_list_networks(filters=filters) def test_ex_create_network(self): vpc = self.driver.ex_create_network('192.168.55.0/24', name='Test VPC', instance_tenancy='default') self.assertEqual('vpc-ad3527cf', vpc.id) self.assertEqual('192.168.55.0/24', vpc.cidr_block) self.assertEqual('pending', vpc.extra['state']) def test_ex_delete_network(self): vpcs = self.driver.ex_list_networks() vpc = vpcs[0] resp = self.driver.ex_delete_network(vpc) self.assertTrue(resp) def test_ex_list_subnets(self): subnets = self.driver.ex_list_subnets() self.assertEqual(len(subnets), 2) self.assertEqual('subnet-ce0e7ce5', subnets[0].id) self.assertEqual('available', subnets[0].state) self.assertEqual(123, subnets[0].extra['available_ips']) self.assertEqual('subnet-ce0e7ce6', subnets[1].id) self.assertEqual('available', subnets[1].state) self.assertEqual(59, subnets[1].extra['available_ips']) def test_ex_create_subnet(self): subnet = self.driver.ex_create_subnet('vpc-532135d1', '192.168.51.128/26', 'us-east-1b', name='Test Subnet') self.assertEqual('subnet-ce0e7ce6', subnet.id) self.assertEqual('pending', subnet.state) self.assertEqual('vpc-532135d1', subnet.extra['vpc_id']) def test_ex_delete_subnet(self): subnet = self.driver.ex_list_subnets()[0] resp = self.driver.ex_delete_subnet(subnet=subnet) self.assertTrue(resp) def test_ex_get_console_output(self): node = self.driver.list_nodes()[0] resp = self.driver.ex_get_console_output(node) self.assertEqual('Test String', resp['output']) def test_ex_list_network_interfaces(self): interfaces = self.driver.ex_list_network_interfaces() self.assertEqual(len(interfaces), 2) self.assertEqual('eni-18e6c05e', interfaces[0].id) self.assertEqual('in-use', interfaces[0].state) self.assertEqual('0e:6e:df:72:78:af', interfaces[0].extra['mac_address']) self.assertEqual('eni-83e3c5c5', interfaces[1].id) self.assertEqual('in-use', interfaces[1].state) self.assertEqual('0e:93:0b:e9:e9:c4', interfaces[1].extra['mac_address']) def test_ex_create_network_interface(self): subnet = self.driver.ex_list_subnets()[0] interface = self.driver.ex_create_network_interface( subnet, name='Test Interface', description='My Test') self.assertEqual('eni-2b36086d', interface.id) self.assertEqual('pending', interface.state) self.assertEqual('0e:bd:49:3e:11:74', interface.extra['mac_address']) def test_ex_delete_network_interface(self): interface = self.driver.ex_list_network_interfaces()[0] resp = self.driver.ex_delete_network_interface(interface) self.assertTrue(resp) def test_ex_attach_network_interface_to_node(self): node = self.driver.list_nodes()[0] interface = self.driver.ex_list_network_interfaces()[0] resp = self.driver.ex_attach_network_interface_to_node(interface, node, 1) self.assertTrue(resp) def test_ex_detach_network_interface(self): resp = self.driver.ex_detach_network_interface('eni-attach-2b588b47') self.assertTrue(resp) def test_ex_list_internet_gateways(self): gateways = self.driver.ex_list_internet_gateways() self.assertEqual(len(gateways), 2) self.assertEqual('igw-84dd3ae1', gateways[0].id) self.assertEqual('igw-7fdae215', gateways[1].id) self.assertEqual('available', gateways[1].state) self.assertEqual('vpc-62cad41e', gateways[1].vpc_id) def test_ex_create_internet_gateway(self): gateway = self.driver.ex_create_internet_gateway() self.assertEqual('igw-13ac2b36', gateway.id) def test_ex_delete_internet_gateway(self): gateway = self.driver.ex_list_internet_gateways()[0] resp = self.driver.ex_delete_internet_gateway(gateway) self.assertTrue(resp) def test_ex_attach_internet_gateway(self): gateway = self.driver.ex_list_internet_gateways()[0] network = self.driver.ex_list_networks()[0] resp = self.driver.ex_attach_internet_gateway(gateway, network) self.assertTrue(resp) def test_ex_detach_internet_gateway(self): gateway = self.driver.ex_list_internet_gateways()[0] network = self.driver.ex_list_networks()[0] resp = self.driver.ex_detach_internet_gateway(gateway, network) self.assertTrue(resp) def test_ex_modify_volume(self): volume = self.driver.list_volumes()[0] assert volume.id == 'vol-10ae5e2b' params = {'VolumeType': 'io1', 'Size': 2, 'Iops': 1000} volume_modification = self.driver.ex_modify_volume(volume, params) self.assertIsNone(volume_modification.end_time) self.assertEqual('modifying', volume_modification.modification_state) self.assertEqual(300, volume_modification.original_iops) self.assertEqual(1, volume_modification.original_size) self.assertEqual('gp2', volume_modification.original_volume_type) self.assertEqual(0, volume_modification.progress) self.assertIsNone(volume_modification.status_message) self.assertEqual(1000, volume_modification.target_iops) self.assertEqual(2, volume_modification.target_size) self.assertEqual('io1', volume_modification.target_volume_type) self.assertEqual('vol-10ae5e2b', volume_modification.volume_id) def test_ex_describe_volumes_modifications(self): modifications = self.driver.ex_describe_volumes_modifications() self.assertEqual(len(modifications), 2) self.assertIsNone(modifications[0].end_time) self.assertEqual('optimizing', modifications[0].modification_state) self.assertEqual(100, modifications[0].original_iops) self.assertEqual(10, modifications[0].original_size) self.assertEqual('gp2', modifications[0].original_volume_type) self.assertEqual(3, modifications[0].progress) self.assertIsNone(modifications[0].status_message) self.assertEqual(10000, modifications[0].target_iops) self.assertEqual(2000, modifications[0].target_size) self.assertEqual('io1', modifications[0].target_volume_type) self.assertEqual('vol-06397e7a0eEXAMPLE', modifications[0].volume_id) self.assertEqual('completed', modifications[1].modification_state) self.assertEqual(100, modifications[1].original_iops) self.assertEqual(8, modifications[1].original_size) self.assertEqual('gp2', modifications[1].original_volume_type) self.assertEqual(100, modifications[1].progress) self.assertIsNone(modifications[1].status_message) self.assertEqual(10000, modifications[1].target_iops) self.assertEqual(200, modifications[1].target_size) self.assertEqual('io1', modifications[1].target_volume_type) self.assertEqual('vol-bEXAMPLE', modifications[1].volume_id) class EC2USWest1Tests(EC2Tests): region = 'us-west-1' class EC2USWest2Tests(EC2Tests): region = 'us-west-2' class EC2EUWestTests(EC2Tests): region = 'eu-west-1' class EC2APSE1Tests(EC2Tests): region = 'ap-southeast-1' class EC2APNETests(EC2Tests): region = 'ap-northeast-1' class EC2APSE2Tests(EC2Tests): region = 'ap-southeast-2' class EC2SAEastTests(EC2Tests): region = 'sa-east-1' class EC2MockHttp(MockHttp): fixtures = ComputeFileFixtures('ec2') def _DescribeInstances(self, method, url, body, headers): body = self.fixtures.load('describe_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeReservedInstances(self, method, url, body, headers): body = self.fixtures.load('describe_reserved_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeAvailabilityZones(self, method, url, body, headers): body = self.fixtures.load('describe_availability_zones.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _RebootInstances(self, method, url, body, headers): body = self.fixtures.load('reboot_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _StartInstances(self, method, url, body, headers): body = self.fixtures.load('start_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _StopInstances(self, method, url, body, headers): body = self.fixtures.load('stop_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeSecurityGroups(self, method, url, body, headers): body = self.fixtures.load('describe_security_groups.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteSecurityGroup(self, method, url, body, headers): body = self.fixtures.load('delete_security_group.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AuthorizeSecurityGroupIngress(self, method, url, body, headers): body = self.fixtures.load('authorize_security_group_ingress.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeImages(self, method, url, body, headers): body = self.fixtures.load('describe_images.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _RegisterImages(self, method, url, body, headers): body = self.fixtures.load('register_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ImportSnapshot(self, method, url, body, headers): body = self.fixtures.load('import_snapshot.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeImportSnapshotTasks(self, method, url, body, headers): body = self.fixtures.load('describe_import_snapshot_tasks.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _timeout_DescribeImportSnapshotTasks(self, method, url, body, headers): body = self.fixtures.load('describe_import_snapshot_tasks_active.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ex_imageids_DescribeImages(self, method, url, body, headers): body = self.fixtures.load('describe_images_ex_imageids.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _RunInstances(self, method, url, body, headers): body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_ex_assign_public_ip_RunInstances(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, { 'NetworkInterface.1.AssociatePublicIpAddress': "true", 'NetworkInterface.1.DeleteOnTermination': "true", 'NetworkInterface.1.DeviceIndex': "0", 'NetworkInterface.1.SubnetId': "subnet-11111111", 'NetworkInterface.1.SecurityGroupId.1': "sg-11111111", }) body = self.fixtures.load('run_instances_with_subnet_and_security_group.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_ex_terminate_on_shutdown_RunInstances(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, { 'InstanceInitiatedShutdownBehavior': 'terminate' }) body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ex_security_groups_RunInstances(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'SecurityGroup.1': 'group1'}) self.assertUrlContainsQueryParams(url, {'SecurityGroup.2': 'group2'}) body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ex_security_group_ids_RunInstances(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'SecurityGroupId.1': 'sg-1aa11a1a'}) self.assertUrlContainsQueryParams(url, {'SecurityGroupId.2': 'sg-2bb22b2b'}) body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers): expected_params = { 'BlockDeviceMapping.1.DeviceName': '/dev/sda1', 'BlockDeviceMapping.1.Ebs.VolumeSize': '10', 'BlockDeviceMapping.2.DeviceName': '/dev/sdb', 'BlockDeviceMapping.2.VirtualName': 'ephemeral0', 'BlockDeviceMapping.3.DeviceName': '/dev/sdc', 'BlockDeviceMapping.3.VirtualName': 'ephemeral1' } self.assertUrlContainsQueryParams(url, expected_params) body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _idempotent_RunInstances(self, method, url, body, headers): body = self.fixtures.load('run_instances_idem.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _idempotent_mismatch_RunInstances(self, method, url, body, headers): body = self.fixtures.load('run_instances_idem_mismatch.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) def _ex_iam_profile_RunInstances(self, method, url, body, headers): body = self.fixtures.load('run_instances_iam_profile.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _TerminateInstances(self, method, url, body, headers): body = self.fixtures.load('terminate_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeKeyPairs(self, method, url, body, headers): body = self.fixtures.load('describe_key_pairs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _get_one_DescribeKeyPairs(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'}) body = self.fixtures.load('describe_key_pairs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _doesnt_exist_DescribeKeyPairs(self, method, url, body, headers): body = self.fixtures.load('describe_key_pairs_doesnt_exist.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) def _CreateKeyPair(self, method, url, body, headers): body = self.fixtures.load('create_key_pair.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ImportKeyPair(self, method, url, body, headers): body = self.fixtures.load('import_key_pair.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeTags(self, method, url, body, headers): body = self.fixtures.load('describe_tags.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateTags(self, method, url, body, headers): body = self.fixtures.load('create_tags.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteTags(self, method, url, body, headers): body = self.fixtures.load('delete_tags.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeAddresses(self, method, url, body, headers): body = self.fixtures.load('describe_addresses_multi.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AllocateAddress(self, method, url, body, headers): body = self.fixtures.load('allocate_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _vpc_AllocateAddress(self, method, url, body, headers): body = self.fixtures.load('allocate_vpc_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AssociateAddress(self, method, url, body, headers): body = self.fixtures.load('associate_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _vpc_AssociateAddress(self, method, url, body, headers): body = self.fixtures.load('associate_vpc_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DisassociateAddress(self, method, url, body, headers): body = self.fixtures.load('disassociate_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ReleaseAddress(self, method, url, body, headers): body = self.fixtures.load('release_address.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _all_addresses_DescribeAddresses(self, method, url, body, headers): body = self.fixtures.load('describe_addresses_all.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _WITH_TAGS_DescribeAddresses(self, method, url, body, headers): body = self.fixtures.load('describe_addresses_multi.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ModifyInstanceAttribute(self, method, url, body, headers): body = self.fixtures.load('modify_instance_attribute.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ModifySnapshotAttribute(self, method, url, body, headers): body = self.fixtures.load('modify_snapshot_attribute.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateVolume(self, method, url, body, headers): body = self.fixtures.load('create_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteVolume(self, method, url, body, headers): body = self.fixtures.load('delete_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AttachVolume(self, method, url, body, headers): body = self.fixtures.load('attach_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DetachVolume(self, method, url, body, headers): body = self.fixtures.load('detach_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeVolumes(self, method, url, body, headers): body = self.fixtures.load('describe_volumes.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateSnapshot(self, method, url, body, headers): body = self.fixtures.load('create_snapshot.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeSnapshots(self, method, url, body, headers): body = self.fixtures.load('describe_snapshots.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteSnapshot(self, method, url, body, headers): body = self.fixtures.load('delete_snapshot.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CopyImage(self, method, url, body, headers): body = self.fixtures.load('copy_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateImage(self, method, url, body, headers): body = self.fixtures.load('create_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeregisterImage(self, method, url, body, headers): body = self.fixtures.load('deregister_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteKeyPair(self, method, url, body, headers): self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'}) body = self.fixtures.load('delete_key_pair.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ModifyImageAttribute(self, method, url, body, headers): body = self.fixtures.load('modify_image_attribute.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeAccountAttributes(self, method, url, body, headers): body = self.fixtures.load('describe_account_attributes.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateSecurityGroup(self, method, url, body, headers): body = self.fixtures.load('create_security_group.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeVpcs(self, method, url, body, headers): body = self.fixtures.load('describe_vpcs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _network_ids_DescribeVpcs(self, method, url, body, headers): expected_params = { 'VpcId.1': 'vpc-532335e1' } self.assertUrlContainsQueryParams(url, expected_params) body = self.fixtures.load('describe_vpcs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _filters_DescribeVpcs(self, method, url, body, headers): expected_params_1 = { 'Filter.1.Name': 'dhcp-options-id', 'Filter.1.Value.1': 'dopt-7eded312', 'Filter.2.Name': 'cidr', 'Filter.2.Value.1': '192.168.51.0/24' } expected_params_2 = { 'Filter.1.Name': 'cidr', 'Filter.1.Value.1': '192.168.51.0/24', 'Filter.2.Name': 'dhcp-options-id', 'Filter.2.Value.1': 'dopt-7eded312' } try: self.assertUrlContainsQueryParams(url, expected_params_1) except AssertionError: # dict ordering is not guaranteed self.assertUrlContainsQueryParams(url, expected_params_2) body = self.fixtures.load('describe_vpcs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateVpc(self, method, url, body, headers): body = self.fixtures.load('create_vpc.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteVpc(self, method, url, body, headers): body = self.fixtures.load('delete_vpc.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeSubnets(self, method, url, body, headers): body = self.fixtures.load('describe_subnets.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateSubnet(self, method, url, body, headers): body = self.fixtures.load('create_subnet.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteSubnet(self, method, url, body, headers): body = self.fixtures.load('delete_subnet.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _GetConsoleOutput(self, method, url, body, headers): body = self.fixtures.load('get_console_output.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeNetworkInterfaces(self, method, url, body, headers): body = self.fixtures.load('describe_network_interfaces.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateNetworkInterface(self, method, url, body, headers): body = self.fixtures.load('create_network_interface.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteNetworkInterface(self, method, url, body, headers): body = self.fixtures.load('delete_network_interface.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AttachNetworkInterface(self, method, url, body, headers): body = self.fixtures.load('attach_network_interface.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DetachNetworkInterface(self, method, url, body, headers): body = self.fixtures.load('detach_network_interface.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeInternetGateways(self, method, url, body, headers): body = self.fixtures.load('describe_internet_gateways.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateInternetGateway(self, method, url, body, headers): body = self.fixtures.load('create_internet_gateway.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteInternetGateway(self, method, url, body, headers): body = self.fixtures.load('delete_internet_gateway.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AttachInternetGateway(self, method, url, body, headers): body = self.fixtures.load('attach_internet_gateway.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DetachInternetGateway(self, method, url, body, headers): body = self.fixtures.load('detach_internet_gateway.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreatePlacementGroup(self, method, url, body, headers): body = self.fixtures.load('create_placement_groups.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeletePlacementGroup(self, method, url, body, headers): body = self.fixtures.load('delete_placement_groups.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribePlacementGroups(self, method, url, body, headers): body = self.fixtures.load('describe_placement_groups.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ModifyVolume(self, method, url, body, headers): body = self.fixtures.load('modify_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeVolumesModifications(self, method, url, body, headers): body = self.fixtures.load('describe_volumes_modifications.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class EucMockHttp(EC2MockHttp): fixtures = ComputeFileFixtures('ec2') def _services_Eucalyptus_DescribeInstances(self, method, url, body, headers): return self._DescribeInstances(method, url, body, headers) def _services_Eucalyptus_DescribeImages(self, method, url, body, headers): return self._DescribeImages(method, url, body, headers) def _services_Eucalyptus_DescribeAddresses(self, method, url, body, headers): return self._DescribeAddresses(method, url, body, headers) def _services_Eucalyptus_RebootInstances(self, method, url, body, headers): return self._RebootInstances(method, url, body, headers) def _services_Eucalyptus_TerminateInstances(self, method, url, body, headers): return self._TerminateInstances(method, url, body, headers) def _services_Eucalyptus_RunInstances(self, method, url, body, headers): return self._RunInstances(method, url, body, headers) def _services_Eucalyptus_DescribeInstanceTypes(self, method, url, body, headers): body = self.fixtures.load('describe_instance_types.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class NimbusTests(EC2Tests): def setUp(self): NimbusNodeDriver.connectionCls.conn_class = EC2MockHttp EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None self.driver = NimbusNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], host='some.nimbuscloud.com') def test_ex_describe_addresses_for_node(self): # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. node = Node('i-4382922a', None, None, None, None, self.driver) ip_addresses = self.driver.ex_describe_addresses_for_node(node) self.assertEqual(len(ip_addresses), 0) def test_ex_describe_addresses(self): # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. node = Node('i-4382922a', None, None, None, None, self.driver) nodes_elastic_ips = self.driver.ex_describe_addresses([node]) self.assertEqual(len(nodes_elastic_ips), 1) self.assertEqual(len(nodes_elastic_ips[node.id]), 0) def test_list_sizes(self): sizes = self.driver.list_sizes() ids = [s.id for s in sizes] self.assertTrue('m1.small' in ids) self.assertTrue('m1.large' in ids) self.assertTrue('m1.xlarge' in ids) def test_list_nodes(self): # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. node = self.driver.list_nodes()[0] self.assertExecutedMethodCount(0) public_ips = node.public_ips self.assertEqual(node.id, 'i-4382922a') self.assertEqual(len(node.public_ips), 1) self.assertEqual(public_ips[0], '1.2.3.4') self.assertEqual(node.extra['tags'], {}) node = self.driver.list_nodes()[1] self.assertExecutedMethodCount(0) public_ips = node.public_ips self.assertEqual(node.id, 'i-8474834a') self.assertEqual(len(node.public_ips), 1) self.assertEqual(public_ips[0], '1.2.3.5') self.assertEqual(node.extra['tags'], {'Name': 'Test Server 2', 'Group': 'VPC Test'}) def test_ex_create_tags(self): # Nimbus doesn't support creating tags so this one should be a # passthrough node = self.driver.list_nodes()[0] self.driver.ex_create_tags(resource=node, tags={'foo': 'bar'}) self.assertExecutedMethodCount(0) class EucTests(LibcloudTestCase, TestCaseMixin): def setUp(self): EucNodeDriver.connectionCls.conn_class = EucMockHttp EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None self.driver = EucNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], host='some.eucalyptus.com', api_version='3.4.1') def test_list_locations_response(self): try: self.driver.list_locations() except Exception: pass else: self.fail('Exception was not thrown') def test_list_location(self): pass def test_list_sizes(self): sizes = self.driver.list_sizes() ids = [s.id for s in sizes] self.assertEqual(len(ids), 18) self.assertTrue('t1.micro' in ids) self.assertTrue('m1.medium' in ids) self.assertTrue('m3.xlarge' in ids) class OutscaleTests(EC2Tests): def setUp(self): OutscaleSASNodeDriver.connectionCls.conn_class = EC2MockHttp EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None self.driver = OutscaleSASNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], host='some.outscalecloud.com') def test_ex_create_network(self): # overridden from EC2Tests -- Outscale don't support instance_tenancy vpc = self.driver.ex_create_network('192.168.55.0/24', name='Test VPC') self.assertEqual('vpc-ad3527cf', vpc.id) self.assertEqual('192.168.55.0/24', vpc.cidr_block) self.assertEqual('pending', vpc.extra['state']) def test_ex_copy_image(self): # overridden from EC2Tests -- Outscale does not support copying images image = self.driver.list_images()[0] try: self.driver.ex_copy_image('us-east-1', image, name='Faux Image', description='Test Image Copy') except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_ex_get_limits(self): # overridden from EC2Tests -- Outscale does not support getting limits try: self.driver.ex_get_limits() except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_ex_create_network_interface(self): # overridden from EC2Tests -- Outscale don't allow creating interfaces subnet = self.driver.ex_list_subnets()[0] try: self.driver.ex_create_network_interface( subnet, name='Test Interface', description='My Test') except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_ex_delete_network_interface(self): # overridden from EC2Tests -- Outscale don't allow deleting interfaces interface = self.driver.ex_list_network_interfaces()[0] try: self.driver.ex_delete_network_interface(interface) except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_ex_attach_network_interface_to_node(self): # overridden from EC2Tests -- Outscale don't allow attaching interfaces node = self.driver.list_nodes()[0] interface = self.driver.ex_list_network_interfaces()[0] try: self.driver.ex_attach_network_interface_to_node(interface, node, 1) except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_ex_detach_network_interface(self): # overridden from EC2Tests -- Outscale don't allow detaching interfaces try: self.driver.ex_detach_network_interface('eni-attach-2b588b47') except NotImplementedError: pass else: self.fail('Exception was not thrown') def test_list_sizes(self): sizes = self.driver.list_sizes() ids = [s.id for s in sizes] self.assertTrue('m1.small' in ids) self.assertTrue('m1.large' in ids) self.assertTrue('m1.xlarge' in ids) class FCUMockHttp(EC2MockHttp): fixtures = ComputeFileFixtures('fcu') def _DescribeQuotas(self, method, url, body, headers): body = self.fixtures.load('ex_describe_quotas.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeProductTypes(self, method, url, body, headers): body = self.fixtures.load('ex_describe_product_types.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeInstanceTypes(self, method, url, body, headers): body = self.fixtures.load('ex_describe_instance_types.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _GetProductType(self, method, url, body, headers): body = self.fixtures.load('ex_get_product_type.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _ModifyInstanceKeypair(self, method, url, body, headers): body = self.fixtures.load('ex_modify_instance_keypair.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OutscaleFCUTests(LibcloudTestCase): def setUp(self): OutscaleSASNodeDriver.connectionCls.conn_class = FCUMockHttp EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None self.driver = OutscaleSASNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], host='some.fcucloud.com') def test_ex_describe_quotas(self): is_truncated, quota = self.driver.ex_describe_quotas() self.assertTrue(is_truncated == 'true') self.assertTrue('global' in quota.keys()) self.assertTrue('vpc-00000000' in quota.keys()) def test_ex_describe_product_types(self): product_types = self.driver.ex_describe_product_types() pt = {} for e in product_types: pt[e['productTypeId']] = e['description'] self.assertTrue('0001' in pt.keys()) self.assertTrue('MapR' in pt.values()) self.assertTrue(pt['0002'] == 'Windows') def test_ex_describe_instance_instance_types(self): instance_types = self.driver.ex_describe_instance_types() it = {} for e in instance_types: it[e['name']] = e['memory'] self.assertTrue('og4.4xlarge' in it.keys()) self.assertTrue('oc2.8xlarge' in it.keys()) self.assertTrue('68718428160' in it.values()) self.assertTrue(it['m3.large'] == '8050966528') def test_ex_get_product_type(self): product_type = self.driver.ex_get_product_type('ami-29ab9e54') self.assertTrue(product_type['productTypeId'] == '0002') self.assertTrue(product_type['description'] == 'Windows') def test_ex_modify_instance_keypair(self): r = self.driver.ex_modify_instance_keypair('i-57292bc5', 'key_name') self.assertTrue(r) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_gridspot.py0000664000175000017500000001700213153541406024114 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib try: import simplejson as json except ImportError: import json from libcloud.common.types import InvalidCredsError from libcloud.compute.drivers.gridspot import GridspotNodeDriver from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.secrets import GRIDSPOT_PARAMS class GridspotTest(unittest.TestCase, TestCaseMixin): def setUp(self): GridspotNodeDriver.connectionCls.conn_class = GridspotMockHttp GridspotMockHttp.type = None self.driver = GridspotNodeDriver(*GRIDSPOT_PARAMS) def test_invalid_creds(self): """ Tests the error-handling for passing a bad API Key to the Gridspot API """ GridspotMockHttp.type = 'BAD_AUTH' self.assertRaises(InvalidCredsError, self.driver.list_nodes) def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 2) running_node = nodes[0] starting_node = nodes[1] self.assertEqual(running_node.id, 'inst_CP2WrQi2WIS4iheyAVkQYw') self.assertEqual(running_node.state, NodeState.RUNNING) self.assertTrue('69.4.239.74' in running_node.public_ips) self.assertEqual(running_node.extra['port'], 62394) self.assertEqual(running_node.extra['vm_ram'], 1429436743) self.assertEqual(running_node.extra['start_state_time'], 1342108905) self.assertEqual(running_node.extra['vm_num_logical_cores'], 8) self.assertEqual(running_node.extra['vm_num_physical_cores'], 4) self.assertEqual(running_node.extra['winning_bid_id'], 'bid_X5xhotGYiGUk7_RmIqVafA') self.assertFalse('ended_state_time' in running_node.extra) self.assertEqual(running_node.extra['running_state_time'], 1342108989) self.assertEqual(starting_node.id, 'inst_CP2WrQi2WIS4iheyAVkQYw2') self.assertEqual(starting_node.state, NodeState.PENDING) self.assertTrue('69.4.239.74' in starting_node.public_ips) self.assertEqual(starting_node.extra['port'], 62395) self.assertEqual(starting_node.extra['vm_ram'], 1429436744) self.assertEqual(starting_node.extra['start_state_time'], 1342108906) self.assertEqual(starting_node.extra['vm_num_logical_cores'], 7) self.assertEqual(starting_node.extra['vm_num_physical_cores'], 5) self.assertEqual(starting_node.extra['winning_bid_id'], 'bid_X5xhotGYiGUk7_RmIqVafA1') self.assertFalse('ended_state_time' in starting_node.extra) self.assertEqual(starting_node.extra['running_state_time'], 1342108990) def test_create_node(self): """ Gridspot does not implement this functionality """ pass def test_destroy_node(self): """ Test destroy_node for Gridspot driver """ node = self.driver.list_nodes()[0] self.assertTrue(self.driver.destroy_node(node)) def test_destroy_node_failure(self): """ Gridspot does not fail a destroy node unless the parameters are bad, in which case it 404s """ pass def test_reboot_node(self): """ Gridspot does not implement this functionality """ pass def test_reboot_node_failure(self): """ Gridspot does not implement this functionality """ pass def test_resize_node(self): """ Gridspot does not implement this functionality """ pass def test_reboot_node_response(self): """ Gridspot does not implement this functionality """ pass def test_list_images_response(self): """ Gridspot does not implement this functionality """ pass def test_create_node_response(self): """ Gridspot does not implement this functionality """ pass def test_destroy_node_response(self): """ Gridspot does not implement this functionality """ pass def test_list_sizes_response(self): """ Gridspot does not implement this functionality """ pass def test_resize_node_failure(self): """ Gridspot does not implement this functionality """ pass def test_list_images(self): """ Gridspot does not implement this functionality """ pass def test_list_sizes(self): """ Gridspot does not implement this functionality """ pass def test_list_locations(self): """ Gridspot does not implement this functionality """ pass def test_list_locations_response(self): """ Gridspot does not implement this functionality """ pass class GridspotMockHttp(MockHttp): def _compute_api_v1_list_instances_BAD_AUTH(self, method, url, body, headers): return (httplib.NOT_FOUND, "", {}, httplib.responses[httplib.NOT_FOUND]) def _compute_api_v1_list_instances(self, method, url, body, headers): body = json.dumps({ "instances": [ { "instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw", "vm_num_logical_cores": 8, "vm_num_physical_cores": 4, "winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA", "vm_ram": 1429436743, "start_state_time": 1342108905, "vm_ssh_wan_ip_endpoint": "69.4.239.74:62394", "current_state": "Running", "ended_state_time": "null", "running_state_time": 1342108989 }, { "instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw2", "vm_num_logical_cores": 7, "vm_num_physical_cores": 5, "winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA1", "vm_ram": 1429436744, "start_state_time": 1342108906, "vm_ssh_wan_ip_endpoint": "69.4.239.74:62395", "current_state": "Starting", "ended_state_time": "null", "running_state_time": 1342108990 } ], "exception_name": "" }) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_api_v1_stop_instance(self, method, url, body, headers): body = json.dumps({"exception_name": ""}) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_ssh_client.py0000664000175000017500000003563112701023453024417 0ustar kamikami00000000000000# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more§ # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import with_statement import os import sys import tempfile from libcloud import _init_once from libcloud.test import LibcloudTestCase from libcloud.test import unittest from libcloud.compute.ssh import ParamikoSSHClient from libcloud.compute.ssh import ShellOutSSHClient from libcloud.compute.ssh import have_paramiko from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import u from mock import patch, Mock, MagicMock if not have_paramiko: ParamikoSSHClient = None # NOQA else: import paramiko @unittest.skipIf(not have_paramiko, 'Skipping because paramiko is not available') class ParamikoSSHClientTests(LibcloudTestCase): @patch('paramiko.SSHClient', Mock) def setUp(self): """ Creates the object patching the actual connection. """ conn_params = {'hostname': 'dummy.host.org', 'port': 8822, 'username': 'ubuntu', 'key': '~/.ssh/ubuntu_ssh', 'timeout': '600'} _, self.tmp_file = tempfile.mkstemp() os.environ['LIBCLOUD_DEBUG'] = self.tmp_file _init_once() self.ssh_cli = ParamikoSSHClient(**conn_params) @patch('paramiko.SSHClient', Mock) def test_create_with_password(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'password': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') @patch('paramiko.SSHClient', Mock) def test_deprecated_key_argument(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') def test_key_files_and_key_material_arguments_are_mutual_exclusive(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_files': 'id_rsa', 'key_material': 'key'} expected_msg = ('key_files and key_material arguments are mutually ' 'exclusive') self.assertRaisesRegexp(ValueError, expected_msg, ParamikoSSHClient, **conn_params) @patch('paramiko.SSHClient', Mock) def test_key_material_argument(self): path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa') with open(path, 'r') as fp: private_key = fp.read() conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_material': private_key} mock = ParamikoSSHClient(**conn_params) mock.connect() pkey = paramiko.RSAKey.from_private_key(StringIO(private_key)) expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'pkey': pkey, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') @patch('paramiko.SSHClient', Mock) def test_key_material_argument_invalid_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_material': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) expected_msg = 'Invalid or unsupported key type' self.assertRaisesRegexp(paramiko.ssh_exception.SSHException, expected_msg, mock.connect) @patch('paramiko.SSHClient', Mock) def test_create_with_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_files': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') @patch('paramiko.SSHClient', Mock) def test_create_with_password_and_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'ubuntu', 'key': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'password': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') @patch('paramiko.SSHClient', Mock) def test_create_without_credentials(self): """ Initialize object with no credentials. Just to have better coverage, initialize the object without 'password' neither 'key'. """ conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'hostname': 'dummy.host.org', 'allow_agent': True, 'look_for_keys': True, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch.object(ParamikoSSHClient, '_consume_stdout', MagicMock(return_value=StringIO(''))) @patch.object(ParamikoSSHClient, '_consume_stderr', MagicMock(return_value=StringIO(''))) def test_basic_usage_absolute_path(self): """ Basic execution. """ mock = self.ssh_cli # script to execute sd = "/root/random_script.sh" # Connect behavior mock.connect() mock_cli = mock.client # The actual mocked object: SSHClient expected_conn = {'username': 'ubuntu', 'key_filename': '~/.ssh/ubuntu_ssh', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'timeout': '600', 'port': 8822} mock_cli.connect.assert_called_once_with(**expected_conn) mock.put(sd) # Make assertions over 'put' method mock_cli.open_sftp().chdir.assert_called_with('root') mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', mode='w') mock.run(sd) # Make assertions over 'run' method mock_cli.get_transport().open_session().exec_command \ .assert_called_once_with(sd) self.assertLogMsg('Executing command (cmd=/root/random_script.sh)') self.assertLogMsg('Command finished') mock.close() def test_delete_script(self): """ Provide a basic test with 'delete' action. """ mock = self.ssh_cli # script to execute sd = '/root/random_script.sh' mock.connect() mock.delete(sd) # Make assertions over the 'delete' method mock.client.open_sftp().unlink.assert_called_with(sd) self.assertLogMsg('Deleting file') mock.close() self.assertLogMsg('Closing server connection') def assertLogMsg(self, expected_msg): with open(self.tmp_file, 'r') as fp: content = fp.read() self.assertTrue(content.find(expected_msg) != -1) def test_consume_stdout(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1024 chan = Mock() chan.recv_ready.side_effect = [True, True, False] chan.recv.side_effect = ['123', '456'] stdout = client._consume_stdout(chan).getvalue() self.assertEqual(u('123456'), stdout) self.assertEqual(len(stdout), 6) conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1024 chan = Mock() chan.recv_ready.side_effect = [True, True, False] chan.recv.side_effect = ['987', '6543210'] stdout = client._consume_stdout(chan).getvalue() self.assertEqual(u('9876543210'), stdout) self.assertEqual(len(stdout), 10) def test_consume_stderr(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1024 chan = Mock() chan.recv_stderr_ready.side_effect = [True, True, False] chan.recv_stderr.side_effect = ['123', '456'] stderr = client._consume_stderr(chan).getvalue() self.assertEqual(u('123456'), stderr) self.assertEqual(len(stderr), 6) conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1024 chan = Mock() chan.recv_stderr_ready.side_effect = [True, True, False] chan.recv_stderr.side_effect = ['987', '6543210'] stderr = client._consume_stderr(chan).getvalue() self.assertEqual(u('9876543210'), stderr) self.assertEqual(len(stderr), 10) def test_consume_stdout_chunk_contains_part_of_multi_byte_utf8_character(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1 chan = Mock() chan.recv_ready.side_effect = [True, True, True, True, False] chan.recv.side_effect = ['\xF0', '\x90', '\x8D', '\x88'] stdout = client._consume_stdout(chan).getvalue() self.assertEqual('\xf0\x90\x8d\x88', stdout.encode('utf-8')) self.assertTrue(len(stdout) in [1, 2]) def test_consume_stderr_chunk_contains_part_of_multi_byte_utf8_character(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} client = ParamikoSSHClient(**conn_params) client.CHUNK_SIZE = 1 chan = Mock() chan.recv_stderr_ready.side_effect = [True, True, True, True, False] chan.recv_stderr.side_effect = ['\xF0', '\x90', '\x8D', '\x88'] stderr = client._consume_stderr(chan).getvalue() self.assertEqual('\xf0\x90\x8d\x88', stderr.encode('utf-8')) self.assertTrue(len(stderr) in [1, 2]) class ShellOutSSHClientTests(LibcloudTestCase): def test_password_auth_not_supported(self): try: ShellOutSSHClient(hostname='localhost', username='foo', password='bar') except ValueError: e = sys.exc_info()[1] msg = str(e) self.assertTrue('ShellOutSSHClient only supports key auth' in msg) else: self.fail('Exception was not thrown') def test_ssh_executable_not_available(self): class MockChild(object): returncode = 127 def communicate(*args, **kwargs): pass def mock_popen(*args, **kwargs): return MockChild() with patch('subprocess.Popen', mock_popen): try: ShellOutSSHClient(hostname='localhost', username='foo') except ValueError: e = sys.exc_info()[1] msg = str(e) self.assertTrue('ssh client is not available' in msg) else: self.fail('Exception was not thrown') def test_connect_success(self): client = ShellOutSSHClient(hostname='localhost', username='root') self.assertTrue(client.connect()) def test_close_success(self): client = ShellOutSSHClient(hostname='localhost', username='root') self.assertTrue(client.close()) def test_get_base_ssh_command(self): client1 = ShellOutSSHClient(hostname='localhost', username='root') client2 = ShellOutSSHClient(hostname='localhost', username='root', key='/home/my.key') client3 = ShellOutSSHClient(hostname='localhost', username='root', key='/home/my.key', timeout=5) cmd1 = client1._get_base_ssh_command() cmd2 = client2._get_base_ssh_command() cmd3 = client3._get_base_ssh_command() self.assertEqual(cmd1, ['ssh', 'root@localhost']) self.assertEqual(cmd2, ['ssh', '-i', '/home/my.key', 'root@localhost']) self.assertEqual(cmd3, ['ssh', '-i', '/home/my.key', '-oConnectTimeout=5', 'root@localhost']) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/compute/test_deployment.py0000664000175000017500000004445113153541406024451 0ustar kamikami00000000000000# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more§ # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import os import sys import time import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import u from libcloud.utils.py3 import PY3 from libcloud.compute.deployment import MultiStepDeployment, Deployment from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment from libcloud.compute.deployment import ScriptFileDeployment, FileDeployment from libcloud.compute.base import Node from libcloud.compute.types import NodeState, DeploymentError, LibcloudError from libcloud.compute.ssh import BaseSSHClient from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver as Rackspace from libcloud.test import MockHttp, XML_HEADERS from libcloud.test.file_fixtures import ComputeFileFixtures from mock import Mock, patch from libcloud.test.secrets import RACKSPACE_PARAMS class MockDeployment(Deployment): def run(self, node, client): return node class MockClient(BaseSSHClient): def __init__(self, *args, **kwargs): self.stdout = '' self.stderr = '' self.exit_status = 0 def put(self, path, contents, chmod=755, mode='w'): return contents def run(self, name): return self.stdout, self.stderr, self.exit_status def delete(self, name): return True class DeploymentTests(unittest.TestCase): def setUp(self): Rackspace.connectionCls.conn_class = RackspaceMockHttp RackspaceMockHttp.type = None self.driver = Rackspace(*RACKSPACE_PARAMS) # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() self.driver.features = {'create_node': ['generates_password']} self.node = Node(id=12345, name='test', state=NodeState.RUNNING, public_ips=['1.2.3.4'], private_ips=['1.2.3.5'], driver=Rackspace) self.node2 = Node(id=123456, name='test', state=NodeState.RUNNING, public_ips=['1.2.3.4'], private_ips=['1.2.3.5'], driver=Rackspace) def test_multi_step_deployment(self): msd = MultiStepDeployment() self.assertEqual(len(msd.steps), 0) msd.add(MockDeployment()) self.assertEqual(len(msd.steps), 1) self.assertEqual(self.node, msd.run(node=self.node, client=None)) def test_ssh_key_deployment(self): sshd = SSHKeyDeployment(key='1234') self.assertEqual(self.node, sshd.run(node=self.node, client=MockClient(hostname='localhost'))) def test_file_deployment(self): # use this file (__file__) for obtaining permissions target = os.path.join('/tmp', os.path.basename(__file__)) fd = FileDeployment(__file__, target) self.assertEqual(target, fd.target) self.assertEqual(__file__, fd.source) self.assertEqual(self.node, fd.run( node=self.node, client=MockClient(hostname='localhost'))) def test_script_deployment(self): sd1 = ScriptDeployment(script='foobar', delete=True) sd2 = ScriptDeployment(script='foobar', delete=False) sd3 = ScriptDeployment( script='foobar', delete=False, name='foobarname') self.assertTrue(sd1.name.find('deployment') != '1') self.assertEqual(sd3.name, 'foobarname') self.assertEqual(self.node, sd1.run(node=self.node, client=MockClient(hostname='localhost'))) self.assertEqual(self.node, sd2.run(node=self.node, client=MockClient(hostname='localhost'))) def test_script_file_deployment(self): file_path = os.path.abspath(__file__) with open(file_path, 'rb') as fp: content = fp.read() if PY3: content = content.decode('utf-8') sfd1 = ScriptFileDeployment(script_file=file_path) self.assertEqual(sfd1.script, content) def test_script_deployment_relative_path(self): client = Mock() client.put.return_value = '/home/ubuntu/relative.sh' client.run.return_value = ('', '', 0) sd = ScriptDeployment(script='echo "foo"', name='relative.sh') sd.run(self.node, client) client.run.assert_called_once_with('/home/ubuntu/relative.sh') def test_script_deployment_absolute_path(self): client = Mock() client.put.return_value = '/home/ubuntu/relative.sh' client.run.return_value = ('', '', 0) sd = ScriptDeployment(script='echo "foo"', name='/root/relative.sh') sd.run(self.node, client) client.run.assert_called_once_with('/root/relative.sh') def test_script_deployment_with_arguments(self): client = Mock() client.put.return_value = '/home/ubuntu/relative.sh' client.run.return_value = ('', '', 0) args = ['arg1', 'arg2', '--option1=test'] sd = ScriptDeployment(script='echo "foo"', args=args, name='/root/relative.sh') sd.run(self.node, client) expected = '/root/relative.sh arg1 arg2 --option1=test' client.run.assert_called_once_with(expected) client.reset_mock() args = [] sd = ScriptDeployment(script='echo "foo"', args=args, name='/root/relative.sh') sd.run(self.node, client) expected = '/root/relative.sh' client.run.assert_called_once_with(expected) def test_script_file_deployment_with_arguments(self): file_path = os.path.abspath(__file__) client = Mock() client.put.return_value = '/home/ubuntu/relative.sh' client.run.return_value = ('', '', 0) args = ['arg1', 'arg2', '--option1=test', 'option2'] sfd = ScriptFileDeployment(script_file=file_path, args=args, name='/root/relative.sh') sfd.run(self.node, client) expected = '/root/relative.sh arg1 arg2 --option1=test option2' client.run.assert_called_once_with(expected) def test_script_deployment_and_sshkey_deployment_argument_types(self): class FileObject(object): def __init__(self, name): self.name = name def read(self): return 'bar' ScriptDeployment(script='foobar') ScriptDeployment(script=u('foobar')) ScriptDeployment(script=FileObject('test')) SSHKeyDeployment(key='foobar') SSHKeyDeployment(key=u('foobar')) SSHKeyDeployment(key=FileObject('test')) try: ScriptDeployment(script=[]) except TypeError: pass else: self.fail('TypeError was not thrown') try: SSHKeyDeployment(key={}) except TypeError: pass else: self.fail('TypeError was not thrown') def test_wait_until_running_running_instantly(self): node2, ips = self.driver.wait_until_running( nodes=[self.node], wait_period=1, timeout=0.5)[0] self.assertEqual(self.node.uuid, node2.uuid) self.assertEqual(['67.23.21.33'], ips) def test_wait_until_running_running_after_1_second(self): RackspaceMockHttp.type = '05_SECOND_DELAY' node2, ips = self.driver.wait_until_running( nodes=[self.node], wait_period=1, timeout=0.5)[0] self.assertEqual(self.node.uuid, node2.uuid) self.assertEqual(['67.23.21.33'], ips) def test_wait_until_running_running_after_1_second_private_ips(self): RackspaceMockHttp.type = '05_SECOND_DELAY' node2, ips = self.driver.wait_until_running( nodes=[self.node], wait_period=1, timeout=0.5, ssh_interface='private_ips')[0] self.assertEqual(self.node.uuid, node2.uuid) self.assertEqual(['10.176.168.218'], ips) def test_wait_until_running_invalid_ssh_interface_argument(self): try: self.driver.wait_until_running(nodes=[self.node], wait_period=1, ssh_interface='invalid') except ValueError: pass else: self.fail('Exception was not thrown') def test_wait_until_running_timeout(self): RackspaceMockHttp.type = 'TIMEOUT' try: self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, timeout=0.5) except LibcloudError: e = sys.exc_info()[1] self.assertTrue(e.value.find('Timed out') != -1) else: self.fail('Exception was not thrown') def test_wait_until_running_running_node_missing_from_list_nodes(self): RackspaceMockHttp.type = 'MISSING' try: self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, timeout=0.5) except LibcloudError: e = sys.exc_info()[1] self.assertTrue(e.value.find('Timed out after 0.5 second') != -1) else: self.fail('Exception was not thrown') def test_wait_until_running_running_multiple_nodes_have_same_uuid(self): RackspaceMockHttp.type = 'SAME_UUID' try: self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, timeout=0.5) except LibcloudError: e = sys.exc_info()[1] self.assertTrue( e.value.find('Unable to match specified uuids') != -1) else: self.fail('Exception was not thrown') def test_wait_until_running_running_wait_for_multiple_nodes(self): RackspaceMockHttp.type = 'MULTIPLE_NODES' nodes = self.driver.wait_until_running( nodes=[self.node, self.node2], wait_period=0.1, timeout=0.5) self.assertEqual(self.node.uuid, nodes[0][0].uuid) self.assertEqual(self.node2.uuid, nodes[1][0].uuid) self.assertEqual(['67.23.21.33'], nodes[0][1]) self.assertEqual(['67.23.21.34'], nodes[1][1]) def test_ssh_client_connect_success(self): mock_ssh_client = Mock() mock_ssh_client.return_value = None ssh_client = self.driver._ssh_client_connect( ssh_client=mock_ssh_client, timeout=0.5) self.assertEqual(mock_ssh_client, ssh_client) def test_ssh_client_connect_timeout(self): mock_ssh_client = Mock() mock_ssh_client.connect = Mock() mock_ssh_client.connect.side_effect = IOError('bam') try: self.driver._ssh_client_connect(ssh_client=mock_ssh_client, timeout=0.5) except LibcloudError: e = sys.exc_info()[1] self.assertTrue(e.value.find('Giving up') != -1) else: self.fail('Exception was not thrown') def test_run_deployment_script_success(self): task = Mock() ssh_client = Mock() ssh_client2 = self.driver._run_deployment_script(task=task, node=self.node, ssh_client=ssh_client, max_tries=2) self.assertTrue(isinstance(ssh_client2, Mock)) def test_run_deployment_script_exception(self): task = Mock() task.run = Mock() task.run.side_effect = Exception('bar') ssh_client = Mock() try: self.driver._run_deployment_script(task=task, node=self.node, ssh_client=ssh_client, max_tries=2) except LibcloudError: e = sys.exc_info()[1] self.assertTrue(e.value.find('Failed after 2 tries') != -1) else: self.fail('Exception was not thrown') @patch('libcloud.compute.base.SSHClient') @patch('libcloud.compute.ssh') def test_deploy_node_success(self, mock_ssh_module, _): self.driver.create_node = Mock() self.driver.create_node.return_value = self.node mock_ssh_module.have_paramiko = True deploy = Mock() node = self.driver.deploy_node(deploy=deploy) self.assertEqual(self.node.id, node.id) @patch('libcloud.compute.base.SSHClient') @patch('libcloud.compute.ssh') def test_deploy_node_exception_run_deployment_script(self, mock_ssh_module, _): self.driver.create_node = Mock() self.driver.create_node.return_value = self.node mock_ssh_module.have_paramiko = True deploy = Mock() deploy.run = Mock() deploy.run.side_effect = Exception('foo') try: self.driver.deploy_node(deploy=deploy) except DeploymentError: e = sys.exc_info()[1] self.assertTrue(e.node.id, self.node.id) else: self.fail('Exception was not thrown') @patch('libcloud.compute.base.SSHClient') @patch('libcloud.compute.ssh') def test_deploy_node_exception_ssh_client_connect(self, mock_ssh_module, ssh_client): self.driver.create_node = Mock() self.driver.create_node.return_value = self.node mock_ssh_module.have_paramiko = True deploy = Mock() ssh_client.side_effect = IOError('bar') try: self.driver.deploy_node(deploy=deploy) except DeploymentError: e = sys.exc_info()[1] self.assertTrue(e.node.id, self.node.id) else: self.fail('Exception was not thrown') @patch('libcloud.compute.ssh') def test_deploy_node_depoy_node_not_implemented(self, mock_ssh_module): self.driver.features = {'create_node': []} mock_ssh_module.have_paramiko = True try: self.driver.deploy_node(deploy=Mock()) except NotImplementedError: pass else: self.fail('Exception was not thrown') self.driver.features = {} try: self.driver.deploy_node(deploy=Mock()) except NotImplementedError: pass else: self.fail('Exception was not thrown') @patch('libcloud.compute.base.SSHClient') @patch('libcloud.compute.ssh') def test_deploy_node_password_auth(self, mock_ssh_module, _): self.driver.features = {'create_node': ['password']} mock_ssh_module.have_paramiko = True self.driver.create_node = Mock() self.driver.create_node.return_value = self.node node = self.driver.deploy_node(deploy=Mock()) self.assertEqual(self.node.id, node.id) @patch('libcloud.compute.base.SSHClient') @patch('libcloud.compute.ssh') def test_exception_is_thrown_is_paramiko_is_not_available(self, mock_ssh_module, _): self.driver.features = {'create_node': ['password']} self.driver.create_node = Mock() self.driver.create_node.return_value = self.node mock_ssh_module.have_paramiko = False try: self.driver.deploy_node(deploy=Mock()) except RuntimeError: e = sys.exc_info()[1] self.assertTrue(str(e).find('paramiko is not installed') != -1) else: self.fail('Exception was not thrown') mock_ssh_module.have_paramiko = True node = self.driver.deploy_node(deploy=Mock()) self.assertEqual(self.node.id, node.id) class RackspaceMockHttp(MockHttp): fixtures = ComputeFileFixtures('openstack') def _v2_0_tokens(self, method, url, body, headers): body = self.fixtures.load('_v2_0__auth_deployment.json') headers = { 'content-type': 'application/json' } return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail(self, method, url, body, headers): body = self.fixtures.load( 'v1_slug_servers_detail_deployment_success.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_05_SECOND_DELAY(self, method, url, body, headers): time.sleep(0.5) body = self.fixtures.load( 'v1_slug_servers_detail_deployment_success.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_TIMEOUT(self, method, url, body, headers): body = self.fixtures.load( 'v1_slug_servers_detail_deployment_pending.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_MISSING(self, method, url, body, headers): body = self.fixtures.load( 'v1_slug_servers_detail_deployment_missing.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_SAME_UUID(self, method, url, body, headers): body = self.fixtures.load( 'v1_slug_servers_detail_deployment_same_uuid.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) def _v1_0_slug_servers_detail_MULTIPLE_NODES(self, method, url, body, headers): body = self.fixtures.load( 'v1_slug_servers_detail_deployment_multiple_nodes.xml') return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/0000775000175000017500000000000013160535110021147 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/test_joyent.py0000664000175000017500000000313013153541406024074 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.test import unittest from libcloud.container.drivers.joyent import JoyentContainerDriver from libcloud.test.secrets import CONTAINER_PARAMS_DOCKER from libcloud.test.container.test_docker import DockerContainerDriverTestCase, DockerMockHttp class JoyentContainerDriverTestCase(DockerContainerDriverTestCase, unittest.TestCase): def setUp(self): # Create a test driver for each version versions = ('linux_124', 'mac_124') self.drivers = [] for version in versions: JoyentContainerDriver.connectionCls.conn_class = \ DockerMockHttp DockerMockHttp.type = None DockerMockHttp.use_param = 'a' driver = JoyentContainerDriver(*CONTAINER_PARAMS_DOCKER) driver.version = version self.drivers.append(driver) apache-libcloud-2.2.1/libcloud/test/container/__init__.py0000664000175000017500000000221112701023453023256 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.container.base import ContainerImage class TestCaseMixin(object): def test_list_images_response(self): images = self.driver.list_images() self.assertTrue(isinstance(images, list)) for image in images: self.assertTrue(isinstance(image, ContainerImage)) if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/test/container/fixtures/0000775000175000017500000000000013160535107023026 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/gke/0000775000175000017500000000000013160535110023566 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/gke/zones_us-central1-a_instance_serverconfig.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/gke/zones_us-central1-a_instance_serverconfig0000664000175000017500000000033113153541406033747 0ustar kamikami00000000000000{"validImageTypes": ["CONTAINER_VM", "COS"], "validNodeVersions": ["1.6.4", "1.6.2", "1.5.7", "1.5.6", "1.4.9"], "defaultClusterVersion": "1.6.4", "validMasterVersions": ["1.6.4", "1.5.7"], "defaultImageType": "COS"} apache-libcloud-2.2.1/libcloud/test/container/fixtures/gke/zones_us-central1-a_list.json0000664000175000017500000000177513153541406031327 0ustar kamikami00000000000000{"clusters": [{ "currentMasterVersion": "1.6.4", "currentNodeCount": 3, "currentNodeVersion": "1.6.4", "initialClusterVersion": "1.6.4", "locations": ["us-central1-a"], "loggingService": "logging.googleapis.com", "name": "cluster-1", "network": "default", "nodeConfig": {"diskSizeGb": 100, "imageType": "COS", "machineType": "n1-standard-1", "oauthScopes": ["https://www.googleapis.com/auth/compute"], "serviceAccount": "default"}, "nodeIpv4CidrSize": 24, "nodePools": [{"autoscaling": {}, "config": {"diskSizeGb": 100, "serviceAccount": "default"}, "initialNodeCount": 3, "instanceGroupUrls": ["https://www.googleapis.com/compute/v1/projects/project_name"], "management": {}, "name": "default-pool", "status": "RUNNING", "version": "1.6.4"}], "selfLink": "https://container.googleapis.com/v1/projects/", "servicesIpv4Cidr": "XX.XX.XXX.X/20", "status": "RUNNING", "subnetwork": "default", "zone": "us-central1-a"}]} apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/0000775000175000017500000000000013160535110025507 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_tags_latest.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_t0000664000175000017500000000031512701023453034242 0ustar kamikami00000000000000{ "name": "latest", "full_size": 65747044, "id": 2343, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:59:54.779484Z", "image_id": null, "v2": true }././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_tags.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_t0000664000175000017500000005253712701023453034257 0ustar kamikami00000000000000{ "count": 88, "next": null, "previous": null, "results": [ { "name": "xenial", "full_size": 47439662, "id": 1589976, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T19:00:51.344198Z", "image_id": null, "v2": true }, { "name": "xenial-20151218.1", "full_size": 47439662, "id": 1589974, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T19:00:45.326998Z", "image_id": null, "v2": true }, { "name": "16.04", "full_size": 47439662, "id": 1589970, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T19:00:39.215205Z", "image_id": null, "v2": true }, { "name": "wily", "full_size": 50294202, "id": 2332, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T19:00:30.141301Z", "image_id": null, "v2": true }, { "name": "wily-20151208", "full_size": 50294202, "id": 1509180, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T19:00:24.324323Z", "image_id": null, "v2": true }, { "name": "15.10", "full_size": 50294202, "id": 2327, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T19:00:18.625895Z", "image_id": null, "v2": true }, { "name": "vivid", "full_size": 49334628, "id": 2329, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T19:00:12.764756Z", "image_id": null, "v2": true }, { "name": "vivid-20151208", "full_size": 49334628, "id": 1509158, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T19:00:07.282476Z", "image_id": null, "v2": true }, { "name": "15.04", "full_size": 49334628, "id": 2298, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T19:00:01.659187Z", "image_id": null, "v2": true }, { "name": "latest", "full_size": 65747044, "id": 2343, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:59:54.779484Z", "image_id": null, "v2": true }, { "name": "trusty", "full_size": 65747044, "id": 2305, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:59:49.102906Z", "image_id": null, "v2": true }, { "name": "trusty-20151218", "full_size": 65747044, "id": 1657173, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T18:59:43.103725Z", "image_id": null, "v2": true }, { "name": "14.04", "full_size": 65747044, "id": 2324, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:59:37.496154Z", "image_id": null, "v2": true }, { "name": "14.04.3", "full_size": 65747044, "id": 693829, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T18:58:46.261151Z", "image_id": null, "v2": true }, { "name": "precise", "full_size": 44194573, "id": 2292, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:57:33.494399Z", "image_id": null, "v2": true }, { "name": "precise-20151208", "full_size": 44194573, "id": 1509115, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2016-01-04T18:57:22.912930Z", "image_id": null, "v2": true }, { "name": "12.04", "full_size": 44194573, "id": 2310, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:57:14.495660Z", "image_id": null, "v2": true }, { "name": "12.04.5", "full_size": 44194573, "id": 2295, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": "2016-01-04T18:57:08.237857Z", "image_id": null, "v2": true }, { "name": "trusty-20151208", "full_size": 65742980, "id": 1509143, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-12-18T18:26:56.770757Z", "image_id": null, "v2": true }, { "name": "wily-20151019", "full_size": 49817335, "id": 1168566, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-12-08T09:04:03.336941Z", "image_id": null, "v2": true }, { "name": "vivid-20151111", "full_size": 49333876, "id": 1389462, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-12-08T09:02:56.243906Z", "image_id": null, "v2": true }, { "name": "trusty-20151028", "full_size": 65742789, "id": 1313714, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-12-08T09:02:32.340602Z", "image_id": null, "v2": true }, { "name": "precise-20151028", "full_size": 44096878, "id": 1313721, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-12-08T09:02:07.834932Z", "image_id": null, "v2": true }, { "name": "vivid-20151106", "full_size": 49329280, "id": 1313735, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": "2015-11-21T01:14:52.126272Z", "image_id": null, "v2": true }, { "name": "12.10", "full_size": 58078433, "id": 2339, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:42:40.878495Z", "image_id": null, "v2": true }, { "name": "trusty-20150218.1", "full_size": 65832655, "id": 2325, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:42:29.777917Z", "image_id": null, "v2": true }, { "name": "utopic-20150211", "full_size": 68374766, "id": 2307, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:41:34.805100Z", "image_id": null, "v2": true }, { "name": "quantal", "full_size": 58078433, "id": 2341, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:40:29.962553Z", "image_id": null, "v2": true }, { "name": "14.04.1", "full_size": 65827193, "id": 2322, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:39:42.479726Z", "image_id": null, "v2": true }, { "name": "raring", "full_size": 57667348, "id": 2316, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:38:47.745980Z", "image_id": null, "v2": true }, { "name": "vivid-20150218", "full_size": 44122689, "id": 2320, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:38:37.964774Z", "image_id": null, "v2": true }, { "name": "vivid-20150309", "full_size": 49448856, "id": 2317, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:37:59.209284Z", "image_id": null, "v2": true }, { "name": "10.04", "full_size": 63533781, "id": 2299, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:37:16.010344Z", "image_id": null, "v2": true }, { "name": "saucy", "full_size": 60522580, "id": 2301, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:37:04.113916Z", "image_id": null, "v2": true }, { "name": "precise-20150228.11", "full_size": 43657047, "id": 2331, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:36:51.552688Z", "image_id": null, "v2": true }, { "name": "utopic-20150228.11", "full_size": 68379536, "id": 2318, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:36:02.986916Z", "image_id": null, "v2": true }, { "name": "13.10", "full_size": 60522580, "id": 2304, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:34:52.333445Z", "image_id": null, "v2": true }, { "name": "13.04", "full_size": 57667348, "id": 2294, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:33:49.391093Z", "image_id": null, "v2": true }, { "name": "precise-20150212", "full_size": 43616335, "id": 2340, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:32:59.135708Z", "image_id": null, "v2": true }, { "name": "trusty-20150228.11", "full_size": 65828716, "id": 2323, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:32:14.798440Z", "image_id": null, "v2": true }, { "name": "lucid", "full_size": 63533781, "id": 2321, "repository": 130, "creator": 7, "last_updater": 134455, "last_updated": "2015-11-14T14:31:11.758283Z", "image_id": null, "v2": true }, { "name": "vivid-20151021", "full_size": 49328003, "id": 1168551, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-10-28T12:21:31.210637Z", "image_id": null, "v2": true }, { "name": "trusty-20151021", "full_size": 65741561, "id": 1168539, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-10-28T12:21:07.780326Z", "image_id": null, "v2": true }, { "name": "precise-20151020", "full_size": 44096883, "id": 1168524, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": "2015-10-28T12:20:39.816211Z", "image_id": null, "v2": true }, { "name": "wily-20151009", "full_size": 49844614, "id": 1096696, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20151009", "full_size": 65861875, "id": 1096682, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20151006", "full_size": 49861095, "id": 1081815, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "vivid-20150930", "full_size": 49345386, "id": 1081804, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20151001", "full_size": 65757468, "id": 1081789, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "precise-20150924", "full_size": 44037965, "id": 1081772, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20150829", "full_size": 49614664, "id": 828778, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20150818", "full_size": 50298307, "id": 778618, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "precise-20150813", "full_size": 43977816, "id": 776393, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "vivid-20150813", "full_size": 49343696, "id": 776144, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150814", "full_size": 65859249, "id": 775535, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20150807", "full_size": 50528668, "id": 693839, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150806", "full_size": 65857914, "id": 693834, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20150731", "full_size": 50488452, "id": 674054, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "vivid-20150802", "full_size": 49340063, "id": 674043, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150730", "full_size": 65860360, "id": 674034, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "precise-20150729", "full_size": 43967445, "id": 674016, "repository": 130, "creator": 2215, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "wily-20150708", "full_size": 50494409, "id": 541269, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "utopic-20150625", "full_size": 68399747, "id": 541258, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150630", "full_size": 65858138, "id": 541253, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "precise-20150626", "full_size": 43878461, "id": 541246, "repository": 130, "creator": 2215, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "vivid-20150528", "full_size": 131333439, "id": 2338, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic-20150528", "full_size": 194454267, "id": 2337, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic", "full_size": 68399747, "id": 2336, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "precise-20150528", "full_size": 133416464, "id": 2335, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic-20150319", "full_size": 194424279, "id": 2334, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic-20150418", "full_size": 194463410, "id": 2333, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "precise-20150427", "full_size": 132465012, "id": 2330, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "trusty-20150612", "full_size": 188284994, "id": 2328, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "wily-20150528.1", "full_size": 132392276, "id": 2326, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "vivid-20150319.1", "full_size": 131685773, "id": 2315, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "14.04.2", "full_size": 65860360, "id": 2314, "repository": 130, "creator": 7, "last_updater": 213249, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150528", "full_size": 188281989, "id": 2313, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "precise-20150612", "full_size": 133706040, "id": 2312, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "trusty-20150320", "full_size": 188300556, "id": 2311, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "vivid-20150611", "full_size": 49338475, "id": 2309, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "trusty-20150427", "full_size": 188278440, "id": 2308, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "wily-20150611", "full_size": 133648792, "id": 2306, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic-20150612", "full_size": 194462706, "id": 2303, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "utopic-20150427", "full_size": 194461653, "id": 2302, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "precise-20150320", "full_size": 131886863, "id": 2300, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "14.10", "full_size": 68399747, "id": 2297, "repository": 130, "creator": 7, "last_updater": 2215, "last_updated": null, "image_id": null, "v2": true }, { "name": "vivid-20150427", "full_size": 131302888, "id": 2296, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false }, { "name": "vivid-20150421", "full_size": 131279915, "id": 2293, "repository": 130, "creator": 7, "last_updater": 7, "last_updated": null, "image_id": null, "v2": false } ] }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu.j0000664000175000017500000001464712701023453034164 0ustar kamikami00000000000000{ "user": "library", "name": "ubuntu", "namespace": "library", "status": 1, "description": "Ubuntu is a Debian-based Linux operating system based on free software.", "is_private": false, "is_automated": false, "can_edit": false, "star_count": 2954, "pull_count": 37627788, "last_updated": "2016-01-04T19:00:53.547174Z", "has_starred": false, "full_description": "# Supported tags and respective `Dockerfile` links\n\n-\t[`12.04.5`, `12.04`, `precise-20151208`, `precise` (*precise/Dockerfile*)](https://github.com/tianon/docker-brew-ubuntu-core/blob/d7f2045ad9b08962d9728f6d9910fa252282b85f/precise/Dockerfile)\n-\t[`14.04.3`, `14.04`, `trusty-20151218`, `trusty`, `latest` (*trusty/Dockerfile*)](https://github.com/tianon/docker-brew-ubuntu-core/blob/d7f2045ad9b08962d9728f6d9910fa252282b85f/trusty/Dockerfile)\n-\t[`15.04`, `vivid-20151208`, `vivid` (*vivid/Dockerfile*)](https://github.com/tianon/docker-brew-ubuntu-core/blob/d7f2045ad9b08962d9728f6d9910fa252282b85f/vivid/Dockerfile)\n-\t[`15.10`, `wily-20151208`, `wily` (*wily/Dockerfile*)](https://github.com/tianon/docker-brew-ubuntu-core/blob/d7f2045ad9b08962d9728f6d9910fa252282b85f/wily/Dockerfile)\n-\t[`16.04`, `xenial-20151218.1`, `xenial` (*xenial/Dockerfile*)](https://github.com/tianon/docker-brew-ubuntu-core/blob/d7f2045ad9b08962d9728f6d9910fa252282b85f/xenial/Dockerfile)\n\nFor more information about this image and its history, please see [the relevant manifest file (`library/ubuntu`)](https://github.com/docker-library/official-images/blob/master/library/ubuntu). This image is updated via pull requests to [the `docker-library/official-images` GitHub repo](https://github.com/docker-library/official-images).\n\nFor detailed information about the virtual/transfer sizes and individual layers of each of the above supported tags, please see [the `ubuntu/tag-details.md` file](https://github.com/docker-library/docs/blob/master/ubuntu/tag-details.md) in [the `docker-library/docs` GitHub repo](https://github.com/docker-library/docs).\n\n# What is Ubuntu?\n\nUbuntu is a Debian-based Linux operating system, with Unity as its default desktop environment. It is based on free software and named after the Southern African philosophy of ubuntu (literally, \"human-ness\"), which often is translated as \"humanity towards others\" or \"the belief in a universal bond of sharing that connects all humanity\".\n\nDevelopment of Ubuntu is led by UK-based Canonical Ltd., a company owned by South African entrepreneur Mark Shuttleworth. Canonical generates revenue through the sale of technical support and other services related to Ubuntu. The Ubuntu project is publicly committed to the principles of open-source software development; people are encouraged to use free software, study how it works, improve upon it, and distribute it.\n\n> [wikipedia.org/wiki/Ubuntu_(operating_system)](https://en.wikipedia.org/wiki/Ubuntu_%28operating_system%29)\n\n![logo](https://raw.githubusercontent.com/docker-library/docs/01c12653951b2fe592c1f93a13b4e289ada0e3a1/ubuntu/logo.png)\n\n# What's in this image?\n\n## `/etc/apt/sources.list`\n\n### `ubuntu:14.04`\n\n```console\n$ docker run ubuntu:14.04 grep -v '^#' /etc/apt/sources.list\n\ndeb http://archive.ubuntu.com/ubuntu/ trusty main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty main restricted\n\ndeb http://archive.ubuntu.com/ubuntu/ trusty-updates main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main restricted\n\ndeb http://archive.ubuntu.com/ubuntu/ trusty universe\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty universe\ndeb http://archive.ubuntu.com/ubuntu/ trusty-updates universe\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty-updates universe\n\n\ndeb http://archive.ubuntu.com/ubuntu/ trusty-security main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty-security main restricted\ndeb http://archive.ubuntu.com/ubuntu/ trusty-security universe\ndeb-src http://archive.ubuntu.com/ubuntu/ trusty-security universe\n```\n\n### `ubuntu:12.04`\n\n```console\n$ docker run ubuntu:12.04 cat /etc/apt/sources.list\n\ndeb http://archive.ubuntu.com/ubuntu/ precise main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ precise main restricted\n\ndeb http://archive.ubuntu.com/ubuntu/ precise-updates main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ precise-updates main restricted\n\ndeb http://archive.ubuntu.com/ubuntu/ precise universe\ndeb-src http://archive.ubuntu.com/ubuntu/ precise universe\ndeb http://archive.ubuntu.com/ubuntu/ precise-updates universe\ndeb-src http://archive.ubuntu.com/ubuntu/ precise-updates universe\n\n\ndeb http://archive.ubuntu.com/ubuntu/ precise-security main restricted\ndeb-src http://archive.ubuntu.com/ubuntu/ precise-security main restricted\ndeb http://archive.ubuntu.com/ubuntu/ precise-security universe\ndeb-src http://archive.ubuntu.com/ubuntu/ precise-security universe\n```\n\n# Supported Docker versions\n\nThis image is officially supported on Docker version 1.9.1.\n\nSupport for older versions (down to 1.6) is provided on a best-effort basis.\n\nPlease see [the Docker installation documentation](https://docs.docker.com/installation/) for details on how to upgrade your Docker daemon.\n\n# User Feedback\n\n## Documentation\n\nDocumentation for this image is stored in the [`ubuntu/` directory](https://github.com/docker-library/docs/tree/master/ubuntu) of the [`docker-library/docs` GitHub repo](https://github.com/docker-library/docs). Be sure to familiarize yourself with the [repository's `README.md` file](https://github.com/docker-library/docs/blob/master/README.md) before attempting a pull request.\n\n## Issues\n\nIf you have any problems with or questions about this image, please contact us through a [GitHub issue](https://github.com/tianon/docker-brew-ubuntu-core/issues).\n\nYou can also reach many of the official image maintainers via the `#docker-library` IRC channel on [Freenode](https://freenode.net).\n\n## Contributing\n\nYou are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.\n\nBefore you start to code, we recommend discussing your plans through a [GitHub issue](https://github.com/tianon/docker-brew-ubuntu-core/issues), especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing." }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/0000775000175000017500000000000013160535110025167 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_pods.json0000664000175000017500000000561612701023453030437 0ustar kamikami00000000000000{ "kind": "PodList", "apiVersion": "v1", "metadata": { "selfLink": "/api/v1/pods", "resourceVersion": "63" }, "items": [ { "metadata": { "name": "hello-world", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/hello-world", "uid": "1fad5411-b9af-11e5-8701-0050568157ec", "resourceVersion": "62", "creationTimestamp": "2016-01-13T04:35:50Z" }, "spec": { "volumes": [ { "name": "default-token-dpyh0", "secret": { "secretName": "default-token-dpyh0" } } ], "containers": [ { "name": "hello-world", "image": "ubuntu:14.04", "resources": {}, "volumeMounts": [ { "name": "default-token-dpyh0", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "imagePullPolicy": "IfNotPresent" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "127.0.0.1", "securityContext": {} }, "status": { "phase": "Running", "conditions": [ { "type": "Ready", "status": "False", "lastProbeTime": null, "lastTransitionTime": "2016-01-13T04:37:09Z", "reason": "ContainersNotReady", "message": "containers with unready status: [hello-world]" } ], "hostIP": "127.0.0.1", "podIP": "172.17.0.2", "startTime": "2016-01-13T04:35:50Z", "containerStatuses": [ { "name": "hello-world", "state": { "waiting": { "reason": "CrashLoopBackOff", "message": "Back-off 20s restarting failed container=hello-world pod=hello-world_default(1fad5411-b9af-11e5-8701-0050568157ec)" } }, "lastState": { "terminated": { "exitCode": 0, "reason": "Completed", "startedAt": "2016-01-13T04:37:07Z", "finishedAt": "2016-01-13T04:37:07Z", "containerID": "docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36" } }, "ready": false, "restartCount": 2, "image": "ubuntu:14.04", "imageID": "docker://c4bea91afef3764163fd506f5c1090be1d34a9b63ece81867cb863455937048e", "containerID": "docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36" } ] } } ] }././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_pods_POST.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_pods_PO0000664000175000017500000000221212701023453033755 0ustar kamikami00000000000000{ "kind": "Pod", "apiVersion": "v1", "metadata": { "name": "hello-world", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/hello-world", "uid": "1fad5411-b9af-11e5-8701-0050568157ec", "resourceVersion": "32", "creationTimestamp": "2016-01-13T04:35:50Z" }, "spec": { "volumes": [ { "name": "default-token-dpyh0", "secret": { "secretName": "default-token-dpyh0" } } ], "containers": [ { "name": "hello-world", "image": "ubuntu:14.04", "resources": {}, "volumeMounts": [ { "name": "default-token-dpyh0", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "imagePullPolicy": "IfNotPresent" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "serviceAccountName": "default", "serviceAccount": "default", "securityContext": {} }, "status": { "phase": "Pending" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default.json0000664000175000017500000000055712701023453033314 0ustar kamikami00000000000000{ "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": "default", "selfLink": "/api/v1/namespaces/default", "uid": "43e99cf9-b99d-11e5-8d53-0050568157ec", "resourceVersion": "6", "creationTimestamp": "2016-01-13T02:28:00Z" }, "spec": { "finalizers": [ "kubernetes" ] }, "status": { "phase": "Active" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_test.json0000664000175000017500000000055312701023453032643 0ustar kamikami00000000000000{ "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": "test", "selfLink": "/api/v1/namespaces/test", "uid": "7cb89199-b9a6-11e5-8d53-0050568157ec", "resourceVersion": "419", "creationTimestamp": "2016-01-13T03:34:01Z" }, "spec": { "finalizers": [ "kubernetes" ] }, "status": { "phase": "Active" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_nodes_127_0_0_1.json0000664000175000017500000000355512701023453032051 0ustar kamikami00000000000000{ "kind": "Node", "apiVersion": "v1", "metadata": { "name": "127.0.0.1", "selfLink": "/api/v1/nodes/127.0.0.1", "uid": "45949cbb-b99d-11e5-8d53-0050568157ec", "resourceVersion": "184", "creationTimestamp": "2016-01-13T02:28:03Z", "labels": { "kubernetes.io/hostname": "127.0.0.1" } }, "spec": { "externalID": "127.0.0.1" }, "status": { "capacity": { "cpu": "2", "memory": "4048236Ki", "pods": "40" }, "allocatable": { "cpu": "2", "memory": "4048236Ki", "pods": "40" }, "conditions": [ { "type": "OutOfDisk", "status": "False", "lastHeartbeatTime": "2016-01-13T02:55:34Z", "lastTransitionTime": "2016-01-13T02:28:03Z", "reason": "KubeletHasSufficientDisk", "message": "kubelet has sufficient disk space available" }, { "type": "Ready", "status": "True", "lastHeartbeatTime": "2016-01-13T02:55:34Z", "lastTransitionTime": "2016-01-13T02:28:03Z", "reason": "KubeletReady", "message": "kubelet is posting ready status" } ], "addresses": [ { "type": "LegacyHostIP", "address": "127.0.0.1" }, { "type": "InternalIP", "address": "127.0.0.1" } ], "daemonEndpoints": { "kubeletEndpoint": { "Port": 10250 } }, "nodeInfo": { "machineID": "1d9faaba9168d4b4a3416e99000002a2", "systemUUID": "42015AA3-9AF2-D089-9105-63CEF89EFE35", "bootID": "c31fdd67-f995-4be5-942d-10df26b40501", "kernelVersion": "3.13.0-46-generic", "osImage": "Ubuntu 14.04.2 LTS", "containerRuntimeVersion": "docker://1.9.1", "kubeletVersion": "v1.2.0-alpha.5.848+3f2e99b7e7d6d8", "kubeProxyVersion": "v1.2.0-alpha.5.848+3f2e99b7e7d6d8" }, "images": null } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_nodes.json0000664000175000017500000000436312701023453030600 0ustar kamikami00000000000000{ "kind": "NodeList", "apiVersion": "v1", "metadata": { "selfLink": "/api/v1/nodes", "resourceVersion": "24" }, "items": [ { "metadata": { "name": "127.0.0.1", "selfLink": "/api/v1/nodes/127.0.0.1", "uid": "45949cbb-b99d-11e5-8d53-0050568157ec", "resourceVersion": "24", "creationTimestamp": "2016-01-13T02:28:03Z", "labels": { "kubernetes.io/hostname": "127.0.0.1" } }, "spec": { "externalID": "127.0.0.1" }, "status": { "capacity": { "cpu": "2", "memory": "4048236Ki", "pods": "40" }, "allocatable": { "cpu": "2", "memory": "4048236Ki", "pods": "40" }, "conditions": [ { "type": "OutOfDisk", "status": "False", "lastHeartbeatTime": "2016-01-13T02:28:53Z", "lastTransitionTime": "2016-01-13T02:28:03Z", "reason": "KubeletHasSufficientDisk", "message": "kubelet has sufficient disk space available" }, { "type": "Ready", "status": "True", "lastHeartbeatTime": "2016-01-13T02:28:53Z", "lastTransitionTime": "2016-01-13T02:28:03Z", "reason": "KubeletReady", "message": "kubelet is posting ready status" } ], "addresses": [ { "type": "LegacyHostIP", "address": "127.0.0.1" }, { "type": "InternalIP", "address": "127.0.0.1" } ], "daemonEndpoints": { "kubeletEndpoint": { "Port": 10250 } }, "nodeInfo": { "machineID": "1d9faaba9168d4b4a3416e99000002a2", "systemUUID": "42015AA3-9AF2-D089-9105-63CEF89EFE35", "bootID": "c31fdd67-f995-4be5-942d-10df26b40501", "kernelVersion": "3.13.0-46-generic", "osImage": "Ubuntu 14.04.2 LTS", "containerRuntimeVersion": "docker://1.9.1", "kubeletVersion": "v1.2.0-alpha.5.848+3f2e99b7e7d6d8", "kubeProxyVersion": "v1.2.0-alpha.5.848+3f2e99b7e7d6d8" }, "images": null } } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces.json0000664000175000017500000000165012701023453031603 0ustar kamikami00000000000000{ "kind": "NamespaceList", "apiVersion": "v1", "metadata": { "selfLink": "/api/v1/namespaces", "resourceVersion": "443" }, "items": [ { "metadata": { "name": "default", "selfLink": "/api/v1/namespaces/default", "uid": "43e99cf9-b99d-11e5-8d53-0050568157ec", "resourceVersion": "6", "creationTimestamp": "2016-01-13T02:28:00Z" }, "spec": { "finalizers": [ "kubernetes" ] }, "status": { "phase": "Active" } }, { "metadata": { "name": "test", "selfLink": "/api/v1/namespaces/test", "uid": "7cb89199-b9a6-11e5-8d53-0050568157ec", "resourceVersion": "419", "creationTimestamp": "2016-01-13T03:34:01Z" }, "spec": { "finalizers": [ "kubernetes" ] }, "status": { "phase": "Active" } } ] }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_DELETE.jsonapache-libcloud-2.2.1/libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_DELETE.0000664000175000017500000000064112701023453033436 0ustar kamikami00000000000000{ "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": "test", "selfLink": "/api/v1/namespaces/test", "uid": "7cb89199-b9a6-11e5-8d53-0050568157ec", "resourceVersion": "447", "creationTimestamp": "2016-01-13T03:34:01Z", "deletionTimestamp": "2016-01-13T03:38:05Z" }, "spec": { "finalizers": [ "kubernetes" ] }, "status": { "phase": "Terminating" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/0000775000175000017500000000000013160535107024275 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/0000775000175000017500000000000013160535110025415 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/search.json0000664000175000017500000001150313153541406027564 0ustar kamikami00000000000000[{ "star_count": 2558, "is_official": true, "name": "mysql", "is_automated": false, "description": "MySQL is a widely used, open-source relational database management system (RDBMS)." }, { "star_count": 164, "is_official": false, "name": "mysql/mysql-server", "is_automated": true, "description": "Optimized MySQL Server Docker images. Created, maintained and supported by the MySQL team at Oracle" }, { "star_count": 0, "is_official": false, "name": "tozd/mysql", "is_automated": true, "description": "MySQL (MariaDB fork) Docker image." }, { "star_count": 45, "is_official": false, "name": "centurylink/mysql", "is_automated": true, "description": "Image containing mysql. Optimized to be linked to another image/container." }, { "star_count": 8, "is_official": false, "name": "appcontainers/mysql", "is_automated": true, "description": "Centos/Debian Based Customizable MySQL Container - Updated 06/16/2016" }, { "star_count": 2, "is_official": false, "name": "alterway/mysql", "is_automated": true, "description": "Docker Mysql" }, { "star_count": 2, "is_official": false, "name": "drupaldocker/mysql", "is_automated": true, "description": "MySQL for Drupal" }, { "star_count": 2, "is_official": false, "name": "yfix/mysql", "is_automated": true, "description": "Yfix docker built mysql" }, { "star_count": 1, "is_official": false, "name": "phpmentors/mysql", "is_automated": true, "description": "MySQL server image" }, { "star_count": 0, "is_official": false, "name": "cloudposse/mysql", "is_automated": true, "description": "Improved `mysql` service with support for `mysqld_safe` and `fixtures` loaded from `mysqldump.sql`" }, { "star_count": 0, "is_official": false, "name": "nanobox/mysql", "is_automated": true, "description": "MySQL service for nanobox.io" }, { "star_count": 36, "is_official": false, "name": "sameersbn/mysql", "is_automated": true, "description": "" }, { "star_count": 0, "is_official": false, "name": "vukor/mysql", "is_automated": true, "description": "Build for MySQL. Project available on https://github.com/vukor/docker-web-stack" }, { "star_count": 6, "is_official": false, "name": "marvambass/mysql", "is_automated": true, "description": "MySQL Server based on Ubuntu 14.04" }, { "star_count": 0, "is_official": false, "name": "lancehudson/docker-mysql", "is_automated": true, "description": "MySQL is a widely used, open-source relational database management system (RDBMS)." }, { "star_count": 1, "is_official": false, "name": "sin30/mysql", "is_automated": true, "description": "MySQL images with my own config files." }, { "star_count": 2, "is_official": false, "name": "azukiapp/mysql", "is_automated": true, "description": "Docker image to run MySQL by Azuki - http://azk.io" }, { "star_count": 1, "is_official": false, "name": "kaluzki/mysql", "is_automated": true, "description": "mysql" }, { "star_count": 0, "is_official": false, "name": "livingobjects/mysql", "is_automated": true, "description": "MySQL" }, { "star_count": 0, "is_official": false, "name": "akilli/mysql", "is_automated": true, "description": "akilli/base based MySQL image" }, { "star_count": 0, "is_official": false, "name": "projectomakase/mysql", "is_automated": true, "description": "Docker image for MySQL" }, { "star_count": 1, "is_official": false, "name": "tetraweb/mysql", "is_automated": true, "description": "" }, { "star_count": 0, "is_official": false, "name": "dockerizedrupal/mysql", "is_automated": true, "description": "docker-mysql" }, { "star_count": 1, "is_official": false, "name": "debezium/example-mysql", "is_automated": true, "description": "Example MySQL database server with a simple Inventory database, useful for demos and tutorials." }, { "star_count": 16, "is_official": false, "name": "google/mysql", "is_automated": true, "description": "MySQL server for Google Compute Engine" } ]apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/create_container.json0000664000175000017500000000014213153541406031621 0ustar kamikami00000000000000{ "Id": "a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303", "Warnings": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/create_image.txt0000664000175000017500000012202113153541406030570 0ustar kamikami00000000000000{"status":"Pulling from library/ubuntu","id":"12.04"} {"status":"Pulling fs layer","progressDetail":{},"id":"36cef014d5d4"} {"status":"Pulling fs layer","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Pulling fs layer","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Pulling fs layer","progressDetail":{},"id":"44710c456ffc"} {"status":"Pulling fs layer","progressDetail":{},"id":"56e70ac3b314"} {"status":"Waiting","progressDetail":{},"id":"44710c456ffc"} {"status":"Waiting","progressDetail":{},"id":"56e70ac3b314"} {"status":"Downloading","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Verifying Checksum","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Download complete","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Downloading","progressDetail":{"current":16384,"total":57935},"progress":"[==============\u003e ] 16.38 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":32768,"total":57935},"progress":"[============================\u003e ] 32.77 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Verifying Checksum","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Download complete","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Verifying Checksum","progressDetail":{},"id":"56e70ac3b314"} {"status":"Download complete","progressDetail":{},"id":"56e70ac3b314"} {"status":"Downloading","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Verifying Checksum","progressDetail":{},"id":"44710c456ffc"} {"status":"Download complete","progressDetail":{},"id":"44710c456ffc"} {"status":"Downloading","progressDetail":{"current":392563,"total":39081844},"progress":"[\u003e ] 392.6 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":783955,"total":39081844},"progress":"[=\u003e ] 784 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1178995,"total":39081844},"progress":"[=\u003e ] 1.179 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1572211,"total":39081844},"progress":"[==\u003e ] 1.572 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1965427,"total":39081844},"progress":"[==\u003e ] 1.965 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":2358643,"total":39081844},"progress":"[===\u003e ] 2.359 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":2751859,"total":39081844},"progress":"[===\u003e ] 2.752 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3145075,"total":39081844},"progress":"[====\u003e ] 3.145 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3538291,"total":39081844},"progress":"[====\u003e ] 3.538 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3931507,"total":39081844},"progress":"[=====\u003e ] 3.932 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":4324723,"total":39081844},"progress":"[=====\u003e ] 4.325 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":4717939,"total":39081844},"progress":"[======\u003e ] 4.718 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5111155,"total":39081844},"progress":"[======\u003e ] 5.111 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5504371,"total":39081844},"progress":"[=======\u003e ] 5.504 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5897587,"total":39081844},"progress":"[=======\u003e ] 5.898 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":6290803,"total":39081844},"progress":"[========\u003e ] 6.291 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":6684019,"total":39081844},"progress":"[========\u003e ] 6.684 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7077235,"total":39081844},"progress":"[=========\u003e ] 7.077 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7470451,"total":39081844},"progress":"[=========\u003e ] 7.47 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7863667,"total":39081844},"progress":"[==========\u003e ] 7.864 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":8256883,"total":39081844},"progress":"[==========\u003e ] 8.257 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":8650099,"total":39081844},"progress":"[===========\u003e ] 8.65 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9043315,"total":39081844},"progress":"[===========\u003e ] 9.043 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9436531,"total":39081844},"progress":"[============\u003e ] 9.437 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9829747,"total":39081844},"progress":"[============\u003e ] 9.83 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":10222963,"total":39081844},"progress":"[=============\u003e ] 10.22 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":10616179,"total":39081844},"progress":"[=============\u003e ] 10.62 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11009395,"total":39081844},"progress":"[==============\u003e ] 11.01 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11402611,"total":39081844},"progress":"[==============\u003e ] 11.4 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11795827,"total":39081844},"progress":"[===============\u003e ] 11.8 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12189043,"total":39081844},"progress":"[===============\u003e ] 12.19 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12582259,"total":39081844},"progress":"[================\u003e ] 12.58 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12975475,"total":39081844},"progress":"[================\u003e ] 12.98 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":13368691,"total":39081844},"progress":"[=================\u003e ] 13.37 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":13761907,"total":39081844},"progress":"[=================\u003e ] 13.76 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14155123,"total":39081844},"progress":"[==================\u003e ] 14.16 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14548339,"total":39081844},"progress":"[==================\u003e ] 14.55 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14941555,"total":39081844},"progress":"[===================\u003e ] 14.94 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":15334771,"total":39081844},"progress":"[===================\u003e ] 15.33 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":15727987,"total":39081844},"progress":"[====================\u003e ] 15.73 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16121203,"total":39081844},"progress":"[====================\u003e ] 16.12 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16514419,"total":39081844},"progress":"[=====================\u003e ] 16.51 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16907635,"total":39081844},"progress":"[=====================\u003e ] 16.91 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":17300851,"total":39081844},"progress":"[======================\u003e ] 17.3 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":17694067,"total":39081844},"progress":"[======================\u003e ] 17.69 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18087283,"total":39081844},"progress":"[=======================\u003e ] 18.09 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18480499,"total":39081844},"progress":"[=======================\u003e ] 18.48 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18873715,"total":39081844},"progress":"[========================\u003e ] 18.87 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":19266931,"total":39081844},"progress":"[========================\u003e ] 19.27 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":19660147,"total":39081844},"progress":"[=========================\u003e ] 19.66 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20053363,"total":39081844},"progress":"[=========================\u003e ] 20.05 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20446579,"total":39081844},"progress":"[==========================\u003e ] 20.45 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20839795,"total":39081844},"progress":"[==========================\u003e ] 20.84 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":21233011,"total":39081844},"progress":"[===========================\u003e ] 21.23 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":21626227,"total":39081844},"progress":"[===========================\u003e ] 21.63 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22019443,"total":39081844},"progress":"[============================\u003e ] 22.02 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22412659,"total":39081844},"progress":"[============================\u003e ] 22.41 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22805875,"total":39081844},"progress":"[=============================\u003e ] 22.81 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23199091,"total":39081844},"progress":"[=============================\u003e ] 23.2 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23592307,"total":39081844},"progress":"[==============================\u003e ] 23.59 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23985523,"total":39081844},"progress":"[==============================\u003e ] 23.99 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":24378739,"total":39081844},"progress":"[===============================\u003e ] 24.38 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":24771955,"total":39081844},"progress":"[===============================\u003e ] 24.77 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25165171,"total":39081844},"progress":"[================================\u003e ] 25.17 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25558387,"total":39081844},"progress":"[================================\u003e ] 25.56 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25951603,"total":39081844},"progress":"[=================================\u003e ] 25.95 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":26344819,"total":39081844},"progress":"[=================================\u003e ] 26.34 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":26738035,"total":39081844},"progress":"[==================================\u003e ] 26.74 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27131251,"total":39081844},"progress":"[==================================\u003e ] 27.13 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27524467,"total":39081844},"progress":"[===================================\u003e ] 27.52 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27917683,"total":39081844},"progress":"[===================================\u003e ] 27.92 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":28310899,"total":39081844},"progress":"[====================================\u003e ] 28.31 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":28704115,"total":39081844},"progress":"[====================================\u003e ] 28.7 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29097331,"total":39081844},"progress":"[=====================================\u003e ] 29.1 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29490547,"total":39081844},"progress":"[=====================================\u003e ] 29.49 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29883763,"total":39081844},"progress":"[======================================\u003e ] 29.88 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":30276979,"total":39081844},"progress":"[======================================\u003e ] 30.28 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":30670195,"total":39081844},"progress":"[=======================================\u003e ] 30.67 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31063411,"total":39081844},"progress":"[=======================================\u003e ] 31.06 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31456627,"total":39081844},"progress":"[========================================\u003e ] 31.46 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31849843,"total":39081844},"progress":"[========================================\u003e ] 31.85 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":32243059,"total":39081844},"progress":"[=========================================\u003e ] 32.24 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":32636275,"total":39081844},"progress":"[=========================================\u003e ] 32.64 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33029491,"total":39081844},"progress":"[==========================================\u003e ] 33.03 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33422707,"total":39081844},"progress":"[==========================================\u003e ] 33.42 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33815923,"total":39081844},"progress":"[===========================================\u003e ] 33.82 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34209139,"total":39081844},"progress":"[===========================================\u003e ] 34.21 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34602355,"total":39081844},"progress":"[============================================\u003e ] 34.6 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34995571,"total":39081844},"progress":"[============================================\u003e ] 35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":35388787,"total":39081844},"progress":"[=============================================\u003e ] 35.39 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":35782003,"total":39081844},"progress":"[=============================================\u003e ] 35.78 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36175219,"total":39081844},"progress":"[==============================================\u003e ] 36.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36568435,"total":39081844},"progress":"[==============================================\u003e ] 36.57 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36961651,"total":39081844},"progress":"[===============================================\u003e ] 36.96 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":37354867,"total":39081844},"progress":"[===============================================\u003e ] 37.35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":37748083,"total":39081844},"progress":"[================================================\u003e ] 37.75 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38141299,"total":39081844},"progress":"[================================================\u003e ] 38.14 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38534515,"total":39081844},"progress":"[=================================================\u003e ] 38.53 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38927731,"total":39081844},"progress":"[=================================================\u003e ] 38.93 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Download complete","progressDetail":{},"id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":393216,"total":39081844},"progress":"[\u003e ] 393.2 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":786432,"total":39081844},"progress":"[=\u003e ] 786.4 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1179648,"total":39081844},"progress":"[=\u003e ] 1.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1572864,"total":39081844},"progress":"[==\u003e ] 1.573 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1966080,"total":39081844},"progress":"[==\u003e ] 1.966 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":2359296,"total":39081844},"progress":"[===\u003e ] 2.359 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":2752512,"total":39081844},"progress":"[===\u003e ] 2.753 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3145728,"total":39081844},"progress":"[====\u003e ] 3.146 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3538944,"total":39081844},"progress":"[====\u003e ] 3.539 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3932160,"total":39081844},"progress":"[=====\u003e ] 3.932 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":4325376,"total":39081844},"progress":"[=====\u003e ] 4.325 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":4718592,"total":39081844},"progress":"[======\u003e ] 4.719 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5111808,"total":39081844},"progress":"[======\u003e ] 5.112 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5505024,"total":39081844},"progress":"[=======\u003e ] 5.505 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5898240,"total":39081844},"progress":"[=======\u003e ] 5.898 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":6291456,"total":39081844},"progress":"[========\u003e ] 6.291 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":6684672,"total":39081844},"progress":"[========\u003e ] 6.685 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7077888,"total":39081844},"progress":"[=========\u003e ] 7.078 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7471104,"total":39081844},"progress":"[=========\u003e ] 7.471 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7864320,"total":39081844},"progress":"[==========\u003e ] 7.864 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":8257536,"total":39081844},"progress":"[==========\u003e ] 8.258 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":8650752,"total":39081844},"progress":"[===========\u003e ] 8.651 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9043968,"total":39081844},"progress":"[===========\u003e ] 9.044 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9437184,"total":39081844},"progress":"[============\u003e ] 9.437 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9830400,"total":39081844},"progress":"[============\u003e ] 9.83 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":10223616,"total":39081844},"progress":"[=============\u003e ] 10.22 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":10616832,"total":39081844},"progress":"[=============\u003e ] 10.62 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11010048,"total":39081844},"progress":"[==============\u003e ] 11.01 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11403264,"total":39081844},"progress":"[==============\u003e ] 11.4 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11796480,"total":39081844},"progress":"[===============\u003e ] 11.8 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12189696,"total":39081844},"progress":"[===============\u003e ] 12.19 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12582912,"total":39081844},"progress":"[================\u003e ] 12.58 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12976128,"total":39081844},"progress":"[================\u003e ] 12.98 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":13369344,"total":39081844},"progress":"[=================\u003e ] 13.37 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":13762560,"total":39081844},"progress":"[=================\u003e ] 13.76 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14155776,"total":39081844},"progress":"[==================\u003e ] 14.16 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14548992,"total":39081844},"progress":"[==================\u003e ] 14.55 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14942208,"total":39081844},"progress":"[===================\u003e ] 14.94 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":15335424,"total":39081844},"progress":"[===================\u003e ] 15.34 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":15728640,"total":39081844},"progress":"[====================\u003e ] 15.73 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16121856,"total":39081844},"progress":"[====================\u003e ] 16.12 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16515072,"total":39081844},"progress":"[=====================\u003e ] 16.52 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16908288,"total":39081844},"progress":"[=====================\u003e ] 16.91 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":17301504,"total":39081844},"progress":"[======================\u003e ] 17.3 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":17694720,"total":39081844},"progress":"[======================\u003e ] 17.69 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18087936,"total":39081844},"progress":"[=======================\u003e ] 18.09 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18481152,"total":39081844},"progress":"[=======================\u003e ] 18.48 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18874368,"total":39081844},"progress":"[========================\u003e ] 18.87 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":19267584,"total":39081844},"progress":"[========================\u003e ] 19.27 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":19660800,"total":39081844},"progress":"[=========================\u003e ] 19.66 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20054016,"total":39081844},"progress":"[=========================\u003e ] 20.05 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20447232,"total":39081844},"progress":"[==========================\u003e ] 20.45 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20840448,"total":39081844},"progress":"[==========================\u003e ] 20.84 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":21233664,"total":39081844},"progress":"[===========================\u003e ] 21.23 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":21626880,"total":39081844},"progress":"[===========================\u003e ] 21.63 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22020096,"total":39081844},"progress":"[============================\u003e ] 22.02 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22413312,"total":39081844},"progress":"[============================\u003e ] 22.41 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22806528,"total":39081844},"progress":"[=============================\u003e ] 22.81 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23199744,"total":39081844},"progress":"[=============================\u003e ] 23.2 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23592960,"total":39081844},"progress":"[==============================\u003e ] 23.59 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23986176,"total":39081844},"progress":"[==============================\u003e ] 23.99 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":24379392,"total":39081844},"progress":"[===============================\u003e ] 24.38 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":24772608,"total":39081844},"progress":"[===============================\u003e ] 24.77 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25165824,"total":39081844},"progress":"[================================\u003e ] 25.17 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25559040,"total":39081844},"progress":"[================================\u003e ] 25.56 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25952256,"total":39081844},"progress":"[=================================\u003e ] 25.95 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":26345472,"total":39081844},"progress":"[=================================\u003e ] 26.35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":26738688,"total":39081844},"progress":"[==================================\u003e ] 26.74 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27131904,"total":39081844},"progress":"[==================================\u003e ] 27.13 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27525120,"total":39081844},"progress":"[===================================\u003e ] 27.53 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27918336,"total":39081844},"progress":"[===================================\u003e ] 27.92 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":28311552,"total":39081844},"progress":"[====================================\u003e ] 28.31 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":28704768,"total":39081844},"progress":"[====================================\u003e ] 28.7 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29097984,"total":39081844},"progress":"[=====================================\u003e ] 29.1 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29491200,"total":39081844},"progress":"[=====================================\u003e ] 29.49 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29884416,"total":39081844},"progress":"[======================================\u003e ] 29.88 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":30277632,"total":39081844},"progress":"[======================================\u003e ] 30.28 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":30670848,"total":39081844},"progress":"[=======================================\u003e ] 30.67 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31064064,"total":39081844},"progress":"[=======================================\u003e ] 31.06 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31457280,"total":39081844},"progress":"[========================================\u003e ] 31.46 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31850496,"total":39081844},"progress":"[========================================\u003e ] 31.85 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32243712,"total":39081844},"progress":"[=========================================\u003e ] 32.24 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32636928,"total":39081844},"progress":"[=========================================\u003e ] 32.64 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33030144,"total":39081844},"progress":"[==========================================\u003e ] 33.03 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33423360,"total":39081844},"progress":"[==========================================\u003e ] 33.42 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33816576,"total":39081844},"progress":"[===========================================\u003e ] 33.82 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34209792,"total":39081844},"progress":"[===========================================\u003e ] 34.21 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34603008,"total":39081844},"progress":"[============================================\u003e ] 34.6 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34996224,"total":39081844},"progress":"[============================================\u003e ] 35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":35389440,"total":39081844},"progress":"[=============================================\u003e ] 35.39 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":35782656,"total":39081844},"progress":"[=============================================\u003e ] 35.78 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36175872,"total":39081844},"progress":"[==============================================\u003e ] 36.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36569088,"total":39081844},"progress":"[==============================================\u003e ] 36.57 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36962304,"total":39081844},"progress":"[===============================================\u003e ] 36.96 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":37355520,"total":39081844},"progress":"[===============================================\u003e ] 37.36 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":37748736,"total":39081844},"progress":"[================================================\u003e ] 37.75 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38141952,"total":39081844},"progress":"[================================================\u003e ] 38.14 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38535168,"total":39081844},"progress":"[=================================================\u003e ] 38.54 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38928384,"total":39081844},"progress":"[=================================================\u003e ] 38.93 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":39081844,"total":39081844},"progress":"[==================================================\u003e] 39.08 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Pull complete","progressDetail":{},"id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32768,"total":57935},"progress":"[============================\u003e ] 32.77 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Pull complete","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Extracting","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Pull complete","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Extracting","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Extracting","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Pull complete","progressDetail":{},"id":"44710c456ffc"} {"status":"Extracting","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Extracting","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Pull complete","progressDetail":{},"id":"56e70ac3b314"} {"status":"Digest: sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"} {"status":"Status: Downloaded newer image for ubuntu:12.04"}apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/containers.json0000664000175000017500000002100213153541406030457 0ustar kamikami00000000000000[{ "Id": "160936dc54fe8c332095676d9379003534b8cddd7565fa63018996e06dae1b6b", "Names": ["/hubot"], "Image": "stackstorm/hubot", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734872, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 4 seconds", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "d0c614dcdd1c6a89765c989640239a27ab3a99a5ca3dbd1de7494b939f39652c", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.7", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:07" } } }, "Mounts": [] }, { "Id": "6089a78e3e938a6a693601ad8a3f416d39273f8d2b4d3d3971b82eaa05281d08", "Names": ["/fervent_noether"], "Image": "nginx", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734871, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 5 seconds", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "1298dfe058795ed277e539e80e00cb88bbbcbb08051b1f43f979f878e959e9c4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:06" } } }, "Mounts": [] }, { "Id": "1fa078bc0c56b94a81e62586aebab06042abc524df45fc3197eaff31dd61705f", "Names": ["/cranky_nobel"], "Image": "nginx", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734869, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 6 seconds", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "4985fa8a74f2997ee0794c1866e6492dbc9124705d41596fa27bb4c2505739fb", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:05" } } }, "Mounts": [] }, { "Id": "15faa50fa2a41f417cee3f2013fc150d0f90d9ed3102d8533e65099d50476e4d", "Names": ["/sharp_kilby"], "Image": "nginx", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734868, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 7 seconds", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "912a856ada2ab2e84a95ff2ca5236b9e84976e3daf4844623ba80228bb244660", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.4", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:04" } } }, "Mounts": [] }, { "Id": "c3bd5b03b4d5a636344b75271781c3402341db6be1e76168cb602c5d79439519", "Names": ["/agitated_lalande"], "Image": "nginx", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734867, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 9 seconds", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "2856237f86f145817f3807996c3e89c9ad019d45a29c98cb0ab165abebe11ef9", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03" } } }, "Mounts": [] }, { "Id": "5ad88cda3edccb5523ddd6831a399f406434ef0fc35dc24caa6cd09ee08b7826", "Names": ["/elated_euler"], "Image": "nginx", "ImageID": "sha256:0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "Command": "nginx -g 'daemon off;'", "Created": 1466734255, "Ports": [{ "PrivatePort": 443, "Type": "tcp" }, { "PrivatePort": 80, "Type": "tcp" } ], "Labels": {}, "State": "running", "Status": "Up 10 minutes", "HostConfig": { "NetworkMode": "default" }, "NetworkSettings": { "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "beceac1c78e75bd5f8a13a60e3bf95b76aa45cac2d31978ae77934da17c16cc8", "EndpointID": "80646c52b170bffbde82cffd0fff2c7096ecf55b47ec297481127b2a4e622416", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02" } } }, "Mounts": [] } ]apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/mac_124/version.json0000664000175000017500000000041713153541406030006 0ustar kamikami00000000000000{ "Version": "1.12.0-rc2", "ApiVersion": "1.24", "GitCommit": "a7119de", "GoVersion": "go1.6.2", "Os": "linux", "Arch": "amd64", "KernelVersion": "4.4.13-moby", "Experimental": true, "BuildTime": "2016-06-17T22:09:20.440355664+00:00" }apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/0000775000175000017500000000000013160535110026014 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/search.json0000664000175000017500000001200113153541406030155 0ustar kamikami00000000000000[ { "star_count": 1502, "is_official": true, "name": "mysql", "is_trusted": false, "is_automated": false, "description": "MySQL is a widely used, open-source relational database management system (RDBMS)." }, { "star_count": 80, "is_official": false, "name": "mysql/mysql-server", "is_trusted": true, "is_automated": true, "description": "Optimized MySQL Server Docker images. Created, maintained and supported by the MySQL team at Oracle" }, { "star_count": 31, "is_official": false, "name": "centurylink/mysql", "is_trusted": true, "is_automated": true, "description": "Image containing mysql. Optimized to be linked to another image/container." }, { "star_count": 6, "is_official": false, "name": "appcontainers/mysql", "is_trusted": true, "is_automated": true, "description": "CentOS/Ubuntu/Debian based customizible MySQL 5.5 Container - 284MB/283MB/245MB - Updated 12/14/2015" }, { "star_count": 2, "is_official": false, "name": "alterway/mysql", "is_trusted": true, "is_automated": true, "description": "Docker Mysql" }, { "star_count": 0, "is_official": false, "name": "tozd/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL (MariaDB fork) Docker image." }, { "star_count": 0, "is_official": false, "name": "wenzizone/mysql", "is_trusted": true, "is_automated": true, "description": "mysql" }, { "star_count": 0, "is_official": false, "name": "dockerizedrupal/mysql", "is_trusted": true, "is_automated": true, "description": "docker-mysql" }, { "star_count": 2, "is_official": false, "name": "azukiapp/mysql", "is_trusted": true, "is_automated": true, "description": "Docker image to run MySQL by Azuki - http://azk.io" }, { "star_count": 1, "is_official": false, "name": "phpmentors/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL server image" }, { "star_count": 0, "is_official": false, "name": "lancehudson/docker-mysql", "is_trusted": true, "is_automated": true, "description": "MySQL is a widely used, open-source relational database management system (RDBMS)." }, { "star_count": 1, "is_official": false, "name": "bahmni/mysql", "is_trusted": true, "is_automated": true, "description": "Mysql container for bahmni. Contains the openmrs database" }, { "star_count": 2, "is_official": false, "name": "yfix/mysql", "is_trusted": true, "is_automated": true, "description": "Yfix docker built mysql" }, { "star_count": 23, "is_official": false, "name": "sameersbn/mysql", "is_trusted": true, "is_automated": true, "description": "" }, { "star_count": 0, "is_official": false, "name": "nanobox/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL service for nanobox.io" }, { "star_count": 0, "is_official": false, "name": "withinboredom/mysql", "is_trusted": true, "is_automated": true, "description": "A MySQL container using s6 and Consul -- built on tatum/mysql" }, { "star_count": 4, "is_official": false, "name": "marvambass/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL Server based on Ubuntu 14.04" }, { "star_count": 14, "is_official": false, "name": "google/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL server for Google Compute Engine" }, { "star_count": 1, "is_official": false, "name": "frodenas/mysql", "is_trusted": true, "is_automated": true, "description": "A Docker Image for MySQL" }, { "star_count": 0, "is_official": false, "name": "ahmet2mir/mysql", "is_trusted": true, "is_automated": true, "description": "This is a Debian based image with MySQL server installed listening on port 3306. " }, { "star_count": 25, "is_official": false, "name": "wnameless/mysql-phpmyadmin", "is_trusted": true, "is_automated": true, "description": "MySQL + phpMyAdmin\nhttps://index.docker.io/u/wnameless/mysql-phpmyadmin/" }, { "star_count": 0, "is_official": false, "name": "drupaldocker/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL for Drupal" }, { "star_count": 0, "is_official": false, "name": "tetraweb/mysql", "is_trusted": true, "is_automated": true, "description": "" }, { "star_count": 1, "is_official": false, "name": "boomtownroi/mysql-dev", "is_trusted": true, "is_automated": true, "description": "A mysql box with consul integration for development. Based on tatum box" }, { "star_count": 5, "is_official": false, "name": "ioggstream/mysql", "is_trusted": true, "is_automated": true, "description": "MySQL Image with Master-Slave replication" } ]apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/create_container.json0000664000175000017500000000014213153541406032220 0ustar kamikami00000000000000{ "Id": "a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303", "Warnings": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/create_image.txt0000664000175000017500000012202113153541406031167 0ustar kamikami00000000000000{"status":"Pulling from library/ubuntu","id":"12.04"} {"status":"Pulling fs layer","progressDetail":{},"id":"36cef014d5d4"} {"status":"Pulling fs layer","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Pulling fs layer","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Pulling fs layer","progressDetail":{},"id":"44710c456ffc"} {"status":"Pulling fs layer","progressDetail":{},"id":"56e70ac3b314"} {"status":"Waiting","progressDetail":{},"id":"44710c456ffc"} {"status":"Waiting","progressDetail":{},"id":"56e70ac3b314"} {"status":"Downloading","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Verifying Checksum","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Download complete","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Downloading","progressDetail":{"current":16384,"total":57935},"progress":"[==============\u003e ] 16.38 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":32768,"total":57935},"progress":"[============================\u003e ] 32.77 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Verifying Checksum","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Download complete","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Downloading","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Verifying Checksum","progressDetail":{},"id":"56e70ac3b314"} {"status":"Download complete","progressDetail":{},"id":"56e70ac3b314"} {"status":"Downloading","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Verifying Checksum","progressDetail":{},"id":"44710c456ffc"} {"status":"Download complete","progressDetail":{},"id":"44710c456ffc"} {"status":"Downloading","progressDetail":{"current":392563,"total":39081844},"progress":"[\u003e ] 392.6 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":783955,"total":39081844},"progress":"[=\u003e ] 784 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1178995,"total":39081844},"progress":"[=\u003e ] 1.179 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1572211,"total":39081844},"progress":"[==\u003e ] 1.572 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":1965427,"total":39081844},"progress":"[==\u003e ] 1.965 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":2358643,"total":39081844},"progress":"[===\u003e ] 2.359 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":2751859,"total":39081844},"progress":"[===\u003e ] 2.752 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3145075,"total":39081844},"progress":"[====\u003e ] 3.145 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3538291,"total":39081844},"progress":"[====\u003e ] 3.538 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":3931507,"total":39081844},"progress":"[=====\u003e ] 3.932 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":4324723,"total":39081844},"progress":"[=====\u003e ] 4.325 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":4717939,"total":39081844},"progress":"[======\u003e ] 4.718 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5111155,"total":39081844},"progress":"[======\u003e ] 5.111 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5504371,"total":39081844},"progress":"[=======\u003e ] 5.504 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":5897587,"total":39081844},"progress":"[=======\u003e ] 5.898 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":6290803,"total":39081844},"progress":"[========\u003e ] 6.291 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":6684019,"total":39081844},"progress":"[========\u003e ] 6.684 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7077235,"total":39081844},"progress":"[=========\u003e ] 7.077 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7470451,"total":39081844},"progress":"[=========\u003e ] 7.47 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":7863667,"total":39081844},"progress":"[==========\u003e ] 7.864 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":8256883,"total":39081844},"progress":"[==========\u003e ] 8.257 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":8650099,"total":39081844},"progress":"[===========\u003e ] 8.65 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9043315,"total":39081844},"progress":"[===========\u003e ] 9.043 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9436531,"total":39081844},"progress":"[============\u003e ] 9.437 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":9829747,"total":39081844},"progress":"[============\u003e ] 9.83 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":10222963,"total":39081844},"progress":"[=============\u003e ] 10.22 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":10616179,"total":39081844},"progress":"[=============\u003e ] 10.62 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11009395,"total":39081844},"progress":"[==============\u003e ] 11.01 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11402611,"total":39081844},"progress":"[==============\u003e ] 11.4 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":11795827,"total":39081844},"progress":"[===============\u003e ] 11.8 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12189043,"total":39081844},"progress":"[===============\u003e ] 12.19 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12582259,"total":39081844},"progress":"[================\u003e ] 12.58 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":12975475,"total":39081844},"progress":"[================\u003e ] 12.98 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":13368691,"total":39081844},"progress":"[=================\u003e ] 13.37 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":13761907,"total":39081844},"progress":"[=================\u003e ] 13.76 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14155123,"total":39081844},"progress":"[==================\u003e ] 14.16 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14548339,"total":39081844},"progress":"[==================\u003e ] 14.55 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":14941555,"total":39081844},"progress":"[===================\u003e ] 14.94 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":15334771,"total":39081844},"progress":"[===================\u003e ] 15.33 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":15727987,"total":39081844},"progress":"[====================\u003e ] 15.73 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16121203,"total":39081844},"progress":"[====================\u003e ] 16.12 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16514419,"total":39081844},"progress":"[=====================\u003e ] 16.51 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":16907635,"total":39081844},"progress":"[=====================\u003e ] 16.91 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":17300851,"total":39081844},"progress":"[======================\u003e ] 17.3 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":17694067,"total":39081844},"progress":"[======================\u003e ] 17.69 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18087283,"total":39081844},"progress":"[=======================\u003e ] 18.09 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18480499,"total":39081844},"progress":"[=======================\u003e ] 18.48 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":18873715,"total":39081844},"progress":"[========================\u003e ] 18.87 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":19266931,"total":39081844},"progress":"[========================\u003e ] 19.27 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":19660147,"total":39081844},"progress":"[=========================\u003e ] 19.66 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20053363,"total":39081844},"progress":"[=========================\u003e ] 20.05 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20446579,"total":39081844},"progress":"[==========================\u003e ] 20.45 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":20839795,"total":39081844},"progress":"[==========================\u003e ] 20.84 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":21233011,"total":39081844},"progress":"[===========================\u003e ] 21.23 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":21626227,"total":39081844},"progress":"[===========================\u003e ] 21.63 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22019443,"total":39081844},"progress":"[============================\u003e ] 22.02 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22412659,"total":39081844},"progress":"[============================\u003e ] 22.41 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":22805875,"total":39081844},"progress":"[=============================\u003e ] 22.81 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23199091,"total":39081844},"progress":"[=============================\u003e ] 23.2 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23592307,"total":39081844},"progress":"[==============================\u003e ] 23.59 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":23985523,"total":39081844},"progress":"[==============================\u003e ] 23.99 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":24378739,"total":39081844},"progress":"[===============================\u003e ] 24.38 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":24771955,"total":39081844},"progress":"[===============================\u003e ] 24.77 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25165171,"total":39081844},"progress":"[================================\u003e ] 25.17 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25558387,"total":39081844},"progress":"[================================\u003e ] 25.56 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":25951603,"total":39081844},"progress":"[=================================\u003e ] 25.95 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":26344819,"total":39081844},"progress":"[=================================\u003e ] 26.34 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":26738035,"total":39081844},"progress":"[==================================\u003e ] 26.74 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27131251,"total":39081844},"progress":"[==================================\u003e ] 27.13 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27524467,"total":39081844},"progress":"[===================================\u003e ] 27.52 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":27917683,"total":39081844},"progress":"[===================================\u003e ] 27.92 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":28310899,"total":39081844},"progress":"[====================================\u003e ] 28.31 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":28704115,"total":39081844},"progress":"[====================================\u003e ] 28.7 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29097331,"total":39081844},"progress":"[=====================================\u003e ] 29.1 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29490547,"total":39081844},"progress":"[=====================================\u003e ] 29.49 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":29883763,"total":39081844},"progress":"[======================================\u003e ] 29.88 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":30276979,"total":39081844},"progress":"[======================================\u003e ] 30.28 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":30670195,"total":39081844},"progress":"[=======================================\u003e ] 30.67 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31063411,"total":39081844},"progress":"[=======================================\u003e ] 31.06 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31456627,"total":39081844},"progress":"[========================================\u003e ] 31.46 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":31849843,"total":39081844},"progress":"[========================================\u003e ] 31.85 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":32243059,"total":39081844},"progress":"[=========================================\u003e ] 32.24 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":32636275,"total":39081844},"progress":"[=========================================\u003e ] 32.64 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33029491,"total":39081844},"progress":"[==========================================\u003e ] 33.03 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33422707,"total":39081844},"progress":"[==========================================\u003e ] 33.42 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":33815923,"total":39081844},"progress":"[===========================================\u003e ] 33.82 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34209139,"total":39081844},"progress":"[===========================================\u003e ] 34.21 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34602355,"total":39081844},"progress":"[============================================\u003e ] 34.6 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":34995571,"total":39081844},"progress":"[============================================\u003e ] 35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":35388787,"total":39081844},"progress":"[=============================================\u003e ] 35.39 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":35782003,"total":39081844},"progress":"[=============================================\u003e ] 35.78 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36175219,"total":39081844},"progress":"[==============================================\u003e ] 36.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36568435,"total":39081844},"progress":"[==============================================\u003e ] 36.57 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":36961651,"total":39081844},"progress":"[===============================================\u003e ] 36.96 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":37354867,"total":39081844},"progress":"[===============================================\u003e ] 37.35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":37748083,"total":39081844},"progress":"[================================================\u003e ] 37.75 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38141299,"total":39081844},"progress":"[================================================\u003e ] 38.14 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38534515,"total":39081844},"progress":"[=================================================\u003e ] 38.53 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Downloading","progressDetail":{"current":38927731,"total":39081844},"progress":"[=================================================\u003e ] 38.93 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Download complete","progressDetail":{},"id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":393216,"total":39081844},"progress":"[\u003e ] 393.2 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":786432,"total":39081844},"progress":"[=\u003e ] 786.4 kB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1179648,"total":39081844},"progress":"[=\u003e ] 1.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1572864,"total":39081844},"progress":"[==\u003e ] 1.573 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":1966080,"total":39081844},"progress":"[==\u003e ] 1.966 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":2359296,"total":39081844},"progress":"[===\u003e ] 2.359 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":2752512,"total":39081844},"progress":"[===\u003e ] 2.753 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3145728,"total":39081844},"progress":"[====\u003e ] 3.146 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3538944,"total":39081844},"progress":"[====\u003e ] 3.539 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":3932160,"total":39081844},"progress":"[=====\u003e ] 3.932 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":4325376,"total":39081844},"progress":"[=====\u003e ] 4.325 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":4718592,"total":39081844},"progress":"[======\u003e ] 4.719 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5111808,"total":39081844},"progress":"[======\u003e ] 5.112 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5505024,"total":39081844},"progress":"[=======\u003e ] 5.505 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":5898240,"total":39081844},"progress":"[=======\u003e ] 5.898 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":6291456,"total":39081844},"progress":"[========\u003e ] 6.291 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":6684672,"total":39081844},"progress":"[========\u003e ] 6.685 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7077888,"total":39081844},"progress":"[=========\u003e ] 7.078 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7471104,"total":39081844},"progress":"[=========\u003e ] 7.471 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":7864320,"total":39081844},"progress":"[==========\u003e ] 7.864 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":8257536,"total":39081844},"progress":"[==========\u003e ] 8.258 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":8650752,"total":39081844},"progress":"[===========\u003e ] 8.651 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9043968,"total":39081844},"progress":"[===========\u003e ] 9.044 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9437184,"total":39081844},"progress":"[============\u003e ] 9.437 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":9830400,"total":39081844},"progress":"[============\u003e ] 9.83 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":10223616,"total":39081844},"progress":"[=============\u003e ] 10.22 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":10616832,"total":39081844},"progress":"[=============\u003e ] 10.62 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11010048,"total":39081844},"progress":"[==============\u003e ] 11.01 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11403264,"total":39081844},"progress":"[==============\u003e ] 11.4 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":11796480,"total":39081844},"progress":"[===============\u003e ] 11.8 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12189696,"total":39081844},"progress":"[===============\u003e ] 12.19 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12582912,"total":39081844},"progress":"[================\u003e ] 12.58 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":12976128,"total":39081844},"progress":"[================\u003e ] 12.98 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":13369344,"total":39081844},"progress":"[=================\u003e ] 13.37 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":13762560,"total":39081844},"progress":"[=================\u003e ] 13.76 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14155776,"total":39081844},"progress":"[==================\u003e ] 14.16 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14548992,"total":39081844},"progress":"[==================\u003e ] 14.55 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":14942208,"total":39081844},"progress":"[===================\u003e ] 14.94 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":15335424,"total":39081844},"progress":"[===================\u003e ] 15.34 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":15728640,"total":39081844},"progress":"[====================\u003e ] 15.73 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16121856,"total":39081844},"progress":"[====================\u003e ] 16.12 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16515072,"total":39081844},"progress":"[=====================\u003e ] 16.52 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":16908288,"total":39081844},"progress":"[=====================\u003e ] 16.91 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":17301504,"total":39081844},"progress":"[======================\u003e ] 17.3 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":17694720,"total":39081844},"progress":"[======================\u003e ] 17.69 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18087936,"total":39081844},"progress":"[=======================\u003e ] 18.09 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18481152,"total":39081844},"progress":"[=======================\u003e ] 18.48 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":18874368,"total":39081844},"progress":"[========================\u003e ] 18.87 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":19267584,"total":39081844},"progress":"[========================\u003e ] 19.27 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":19660800,"total":39081844},"progress":"[=========================\u003e ] 19.66 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20054016,"total":39081844},"progress":"[=========================\u003e ] 20.05 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20447232,"total":39081844},"progress":"[==========================\u003e ] 20.45 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":20840448,"total":39081844},"progress":"[==========================\u003e ] 20.84 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":21233664,"total":39081844},"progress":"[===========================\u003e ] 21.23 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":21626880,"total":39081844},"progress":"[===========================\u003e ] 21.63 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22020096,"total":39081844},"progress":"[============================\u003e ] 22.02 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22413312,"total":39081844},"progress":"[============================\u003e ] 22.41 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":22806528,"total":39081844},"progress":"[=============================\u003e ] 22.81 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23199744,"total":39081844},"progress":"[=============================\u003e ] 23.2 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23592960,"total":39081844},"progress":"[==============================\u003e ] 23.59 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":23986176,"total":39081844},"progress":"[==============================\u003e ] 23.99 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":24379392,"total":39081844},"progress":"[===============================\u003e ] 24.38 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":24772608,"total":39081844},"progress":"[===============================\u003e ] 24.77 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25165824,"total":39081844},"progress":"[================================\u003e ] 25.17 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25559040,"total":39081844},"progress":"[================================\u003e ] 25.56 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":25952256,"total":39081844},"progress":"[=================================\u003e ] 25.95 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":26345472,"total":39081844},"progress":"[=================================\u003e ] 26.35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":26738688,"total":39081844},"progress":"[==================================\u003e ] 26.74 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27131904,"total":39081844},"progress":"[==================================\u003e ] 27.13 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27525120,"total":39081844},"progress":"[===================================\u003e ] 27.53 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":27918336,"total":39081844},"progress":"[===================================\u003e ] 27.92 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":28311552,"total":39081844},"progress":"[====================================\u003e ] 28.31 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":28704768,"total":39081844},"progress":"[====================================\u003e ] 28.7 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29097984,"total":39081844},"progress":"[=====================================\u003e ] 29.1 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29491200,"total":39081844},"progress":"[=====================================\u003e ] 29.49 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":29884416,"total":39081844},"progress":"[======================================\u003e ] 29.88 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":30277632,"total":39081844},"progress":"[======================================\u003e ] 30.28 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":30670848,"total":39081844},"progress":"[=======================================\u003e ] 30.67 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31064064,"total":39081844},"progress":"[=======================================\u003e ] 31.06 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31457280,"total":39081844},"progress":"[========================================\u003e ] 31.46 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":31850496,"total":39081844},"progress":"[========================================\u003e ] 31.85 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32243712,"total":39081844},"progress":"[=========================================\u003e ] 32.24 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32636928,"total":39081844},"progress":"[=========================================\u003e ] 32.64 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33030144,"total":39081844},"progress":"[==========================================\u003e ] 33.03 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33423360,"total":39081844},"progress":"[==========================================\u003e ] 33.42 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":33816576,"total":39081844},"progress":"[===========================================\u003e ] 33.82 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34209792,"total":39081844},"progress":"[===========================================\u003e ] 34.21 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34603008,"total":39081844},"progress":"[============================================\u003e ] 34.6 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":34996224,"total":39081844},"progress":"[============================================\u003e ] 35 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":35389440,"total":39081844},"progress":"[=============================================\u003e ] 35.39 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":35782656,"total":39081844},"progress":"[=============================================\u003e ] 35.78 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36175872,"total":39081844},"progress":"[==============================================\u003e ] 36.18 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36569088,"total":39081844},"progress":"[==============================================\u003e ] 36.57 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":36962304,"total":39081844},"progress":"[===============================================\u003e ] 36.96 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":37355520,"total":39081844},"progress":"[===============================================\u003e ] 37.36 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":37748736,"total":39081844},"progress":"[================================================\u003e ] 37.75 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38141952,"total":39081844},"progress":"[================================================\u003e ] 38.14 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38535168,"total":39081844},"progress":"[=================================================\u003e ] 38.54 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":38928384,"total":39081844},"progress":"[=================================================\u003e ] 38.93 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":39081844,"total":39081844},"progress":"[==================================================\u003e] 39.08 MB/39.08 MB","id":"36cef014d5d4"} {"status":"Pull complete","progressDetail":{},"id":"36cef014d5d4"} {"status":"Extracting","progressDetail":{"current":32768,"total":57935},"progress":"[============================\u003e ] 32.77 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":57935,"total":57935},"progress":"[==================================================\u003e] 57.94 kB/57.94 kB","id":"0d99ad4de1d2"} {"status":"Pull complete","progressDetail":{},"id":"0d99ad4de1d2"} {"status":"Extracting","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Extracting","progressDetail":{"current":419,"total":419},"progress":"[==================================================\u003e] 419 B/419 B","id":"3e32dbf1ab94"} {"status":"Pull complete","progressDetail":{},"id":"3e32dbf1ab94"} {"status":"Extracting","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Extracting","progressDetail":{"current":682,"total":682},"progress":"[==================================================\u003e] 682 B/682 B","id":"44710c456ffc"} {"status":"Pull complete","progressDetail":{},"id":"44710c456ffc"} {"status":"Extracting","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Extracting","progressDetail":{"current":162,"total":162},"progress":"[==================================================\u003e] 162 B/162 B","id":"56e70ac3b314"} {"status":"Pull complete","progressDetail":{},"id":"56e70ac3b314"} {"status":"Digest: sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"} {"status":"Status: Downloaded newer image for ubuntu:12.04"}apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/containers.json0000664000175000017500000000647213153541406031074 0ustar kamikami00000000000000[ { "Id": "160936dc54fe8c332095676d9379003534b8cddd7565fa63018996e06dae1b6b", "Names": [ "/hubot" ], "Image": "stackstorm/hubot", "ImageID": "05c5761707b3970a9bf17c00886176add79ac087b4d6a500ac87985bf8ec07b1", "Command": "/app/bin/hubot", "Created": 1450130345, "Ports": [], "Labels": {}, "Status": "Exited (137) 11 minutes ago", "HostConfig": { "NetworkMode": "bridge" } }, { "Id": "f159072147ee7d253e21ec8fd2778a27ac29d7fc5f865641900d16665b46215a", "Names": [ "/mongo" ], "Image": "mongo", "ImageID": "cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0", "Command": "/entrypoint.sh mongod", "Created": 1450130332, "Ports": [], "Labels": {}, "Status": "Exited (14) 2 hours ago", "HostConfig": { "NetworkMode": "bridge" } }, { "Id": "e687b33f9ced0153104308e6ff7a2138b8cc026fa4085d31da831a02ed0dc03d", "Names": [ "/rabbitmq" ], "Image": "rabbitmq", "ImageID": "448afeda0388b18c6f3be18c7aaece29e0f8dbdfab30364e678c382bab1037c5", "Command": "/docker-entrypoint.sh rabbitmq-server", "Created": 1450130331, "Ports": [ { "IP": "0.0.0.0", "PrivatePort": 5672, "PublicPort": 5672, "Type": "tcp" }, { "PrivatePort": 25672, "Type": "tcp" }, { "PrivatePort": 4369, "Type": "tcp" }, { "PrivatePort": 5671, "Type": "tcp" } ], "Labels": {}, "Status": "Exited (137) 11 minutes ago", "HostConfig": { "NetworkMode": "bridge" } }, { "Id": "b82c16423c6dbb7cd1564f8fc413c822df45cc0c7aa35c24683a1329af6ec102", "Names": [ "/fervent_bhabha" ], "Image": "rabbitmq", "ImageID": "448afeda0388b18c6f3be18c7aaece29e0f8dbdfab30364e678c382bab1037c5", "Command": "/docker-entrypoint.sh rabbitmq-server", "Created": 1450059506, "Ports": [ { "PrivatePort": 4369, "Type": "tcp" }, { "PrivatePort": 5671, "Type": "tcp" }, { "IP": "0.0.0.0", "PrivatePort": 5672, "PublicPort": 5672, "Type": "tcp" }, { "PrivatePort": 25672, "Type": "tcp" } ], "Labels": {}, "Status": "Dead", "HostConfig": { "NetworkMode": "bridge" } }, { "Id": "8cc5481aa4621578f8dd2c942d74e27e75170c6899ea012db7a44ea5f1ba2069", "Names": [ "/suspicious_swirles" ], "Image": "mongo", "ImageID": "cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0", "Command": "/entrypoint.sh mongod", "Created": 1450059505, "Ports": [ { "IP": "0.0.0.0", "PrivatePort": 27017, "PublicPort": 27017, "Type": "tcp" } ], "Labels": {}, "Status": "Dead", "HostConfig": { "NetworkMode": "bridge" } }, { "Id": "598b3e4d15a406390baaa2947f910e7b52b810a4120028692ed309247f2e8346", "Names": [ "/mongodata" ], "Image": "mongo", "ImageID": "cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0", "Command": "/entrypoint.sh /bin/true", "Created": 1449637213, "Ports": [], "Labels": {}, "Status": "Created", "HostConfig": { "NetworkMode": "default" } } ]apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/version.json0000664000175000017500000000033713153541406030406 0ustar kamikami00000000000000{ "Version": "1.9.1", "ApiVersion": "1.21", "GitCommit": "a34a1d5", "GoVersion": "go1.4.3", "Os": "linux", "Arch": "amd64", "KernelVersion": "3.13.0-46-generic", "BuildTime": "Fri Nov 20 17:56:04 UTC 2015" }apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/logs.txt0000664000175000017500000000005713153541406027532 0ustar kamikami00000000000000/entrypoint.sh: line 19: exec: None: not found apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/images.json0000664000175000017500000000247213153541406030170 0ustar kamikami00000000000000[ { "Id": "cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0", "ParentId": "3e408cde1b7f6276b9ead7b8111d80a367f9223dfbbd4102ea89a5fc42947960", "RepoTags": [ "mongo:latest" ], "RepoDigests": [], "Created": 1449618009, "Size": 0, "VirtualSize": 316957672, "Labels": null }, { "Id": "05c5761707b3970a9bf17c00886176add79ac087b4d6a500ac87985bf8ec07b1", "ParentId": "be7965ce1bef5d2e3b27efb3f4fe2253683bc7144d2ebae614e9e7155066c833", "RepoTags": [ "stackstorm/hubot:latest" ], "RepoDigests": [], "Created": 1449466772, "Size": 0, "VirtualSize": 550102318, "Labels": {} }, { "Id": "448afeda0388b18c6f3be18c7aaece29e0f8dbdfab30364e678c382bab1037c5", "ParentId": "67edbf589f9af9b2c6f87e8481ec0299c50bfce5f9b98b95316c7235494c7bea", "RepoTags": [ "rabbitmq:latest" ], "RepoDigests": [], "Created": 1449312753, "Size": 0, "VirtualSize": 304310861, "Labels": null }, { "Id": "9da5438fedb2e9a1e11a3361c4a53e0801ed1f8f4c014d83a5a514f0c60892bf", "ParentId": "64ccc5e9d20c638849eadddab4f23204c3fcdd62d497cdbd0ecf44d863b086c8", "RepoTags": [ "mongo:2.4.14" ], "RepoDigests": [], "Created": 1449299455, "Size": 0, "VirtualSize": 344445131, "Labels": null } ]apache-libcloud-2.2.1/libcloud/test/container/fixtures/docker/linux_124/container_a68.json0000664000175000017500000001017213153541406031357 0ustar kamikami00000000000000{ "Id": "a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303", "Created": "2015-12-23T01:05:40.56937184Z", "Path": "/entrypoint.sh", "Args": [ "None" ], "State": { "Status": "running", "Running": false, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 0, "ExitCode": 127, "Error": "", "StartedAt": "2015-12-23T01:06:29.018395755Z", "FinishedAt": "2015-12-23T01:06:30.144487212Z" }, "Image": "cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0", "ResolvConfPath": "/var/lib/docker/containers/a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303/resolv.conf", "HostnamePath": "/var/lib/docker/containers/a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303/hostname", "HostsPath": "/var/lib/docker/containers/a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303/hosts", "LogPath": "/var/lib/docker/containers/a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303/a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303-json.log", "Name": "/gigantic_goldberg", "RestartCount": 0, "Driver": "aufs", "ExecDriver": "native-0.2", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "", "ExecIDs": null, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": null, "Memory": 0, "MemoryReservation": 0, "MemorySwap": 0, "KernelMemory": 0, "CpuShares": 0, "CpuPeriod": 0, "CpusetCpus": "", "CpusetMems": "", "CpuQuota": 0, "BlkioWeight": 0, "OomKillDisable": false, "MemorySwappiness": null, "Privileged": false, "PortBindings": {}, "Links": null, "PublishAllPorts": true, "Dns": null, "DnsOptions": null, "DnsSearch": null, "ExtraHosts": null, "VolumesFrom": null, "Devices": null, "NetworkMode": "default", "IpcMode": "", "PidMode": "", "UTSMode": "", "CapAdd": null, "CapDrop": null, "GroupAdd": null, "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "SecurityOpt": null, "ReadonlyRootfs": false, "Ulimits": null, "LogConfig": { "Type": "json-file", "Config": {} }, "CgroupParent": "", "ConsoleSize": [ 0, 0 ], "VolumeDriver": "" }, "GraphDriver": { "Name": "aufs", "Data": null }, "Mounts": [ { "Name": "b1a70d8e1ebd7d5865e59ff91cf06357e3ef4d829af44c31675c2d0a24894444", "Source": "/var/lib/docker/volumes/b1a70d8e1ebd7d5865e59ff91cf06357e3ef4d829af44c31675c2d0a24894444/_data", "Destination": "/data/db", "Driver": "local", "Mode": "", "RW": true } ], "Config": { "Hostname": "a68c1872c746", "Domainname": "", "User": "", "AttachStdin": true, "AttachStdout": true, "AttachStderr": true, "ExposedPorts": { "27017/tcp": {} }, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GPG_KEYS=DFFA3DCF326E302C4787673A01C4E7FAAAB2461C \t42F3E95A2C4F08279C4960ADD68FA50FEA312927", "MONGO_MAJOR=3.2", "MONGO_VERSION=3.2.0" ], "Cmd": [ "None" ], "Image": "mongo:latest", "Volumes": { "/data/db": {} }, "WorkingDir": "", "Entrypoint": [ "/entrypoint.sh" ], "OnBuild": null, "Labels": {} }, "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": null, "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "", "Networks": { "bridge": { "EndpointID": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "" } } } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/0000775000175000017500000000000013160535110024442 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_deploy_stack.json0000664000175000017500000000303313153541406030520 0ustar kamikami00000000000000{ "id": "1e9", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e9", "account": "http://172.30.0.100:8080/v1/environments/1e9/account", "services": "http://172.30.0.100:8080/v1/environments/1e9/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e9/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e9/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e9/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e9/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e9/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e9/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e9/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e9/?action=exportconfig" }, "name": "newstack", "state": "active", "accountId": "1a5", "created": "2016-10-06T14:12:34Z", "createdTS": 1475763154000, "description": null, "dockerCompose": null, "environment": {"root_password": "password"}, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "7f8b1836-5619-4e19-adfe-6967fe55bda7" }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/list_containers.json0000664000175000017500000003346713153541406030561 0ustar kamikami00000000000000{ "type": "collection", "resourceType": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers" }, "createTypes": { "container": "http://172.30.0.100:8080/v1/containers", "launchConfig": "http://172.30.0.100:8080/v1/launchconfigs", "virtualMachine": "http://172.30.0.100:8080/v1/virtualmachines" }, "actions": {}, "data": [ { "id": "1i1", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i1", "account": "http://172.30.0.100:8080/v1/containers/1i1/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i1/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i1/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i1/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i1/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i1/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i1/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i1/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i1/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i1/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i1/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i1/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i1/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i1/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i1/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i1/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i1/?action=update", "error": "http://172.30.0.100:8080/v1/containers/1i1/?action=error", "remove": "http://172.30.0.100:8080/v1/containers/1i1/?action=remove", "start": "http://172.30.0.100:8080/v1/containers/1i1/?action=start", "logs": "http://172.30.0.100:8080/v1/containers/1i1/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i1/?action=setlabels" }, "name": "Concrete5_CMSMysql_1", "state": "stopped", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": 1, "created": "2016-09-22T17:58:55Z", "createdTS": 1474567135000, "dataVolumeMounts": {}, "dataVolumes": [ "CMS_DB_DATA:/var/lib/mysql" ], "dataVolumesFrom": null, "deploymentUnitUuid": "761675e2-7fc7-4fbf-a825-132429a44349", "description": null, "devices": null, "dns": [ "169.254.169.250" ], "dnsSearch": [ "concrete5.rancher.internal", "cmsmysql.concrete5.rancher.internal", "rancher.internal" ], "dockerPorts": [], "domainName": null, "entryPoint": null, "environment": { "MYSQL_DATABASE": "CMS_DB", "MYSQL_PASSWORD": "password", "MYSQL_ROOT_PASSWORD": "password", "MYSQL_USER": "CMS_USER" }, "expose": null, "externalId": "957136960bd1b51acf5a6c0079d0e35b8d5c14f54722a3063ceb868e85fd3758", "extraHosts": null, "firstRunning": "2016-09-22T17:59:40Z", "firstRunningTS": 1474567180000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:mysql", "kind": "container", "labels": { "io.rancher.project.name": "Concrete5", "io.rancher.container.pull_image": "always", "io.rancher.service.deployment.unit": "761675e2-7fc7-4fbf-a825-132429a44349", "io.rancher.service.launch.config": "io.rancher.service.primary.launch.config", "io.rancher.project_service.name": "Concrete5/CMSMysql", "io.rancher.stack.name": "Concrete5", "io.rancher.stack_service.name": "Concrete5/CMSMysql", "io.rancher.service.hash": "c5d6bacef4be47a5ca5d1517a1d33319d024cdde", "io.rancher.container.uuid": "a1140e7c-9260-423e-a7d0-8f5f7bb8e946", "io.rancher.container.name": "Concrete5_CMSMysql_1", "io.rancher.container.ip": "10.42.80.96/16" }, "logConfig": {}, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkMode": "managed", "pidMode": null, "ports": null, "primaryIpAddress": "10.42.80.96", "privileged": false, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 4, "startOnCreate": true, "stdinOpen": true, "systemContainer": null, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "tty": true, "user": null, "uuid": "a1140e7c-9260-423e-a7d0-8f5f7bb8e946", "version": "0", "volumeDriver": null, "workingDir": null }, { "id": "1i2", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i2", "account": "http://172.30.0.100:8080/v1/containers/1i2/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i2/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i2/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i2/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i2/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i2/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i2/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i2/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i2/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i2/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i2/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i2/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i2/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i2/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i2/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i2/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i2/?action=update", "stop": "http://172.30.0.100:8080/v1/containers/1i2/?action=stop", "restart": "http://172.30.0.100:8080/v1/containers/1i2/?action=restart", "migrate": "http://172.30.0.100:8080/v1/containers/1i2/?action=migrate", "logs": "http://172.30.0.100:8080/v1/containers/1i2/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i2/?action=setlabels", "execute": "http://172.30.0.100:8080/v1/containers/1i2/?action=execute", "proxy": "http://172.30.0.100:8080/v1/containers/1i2/?action=proxy" }, "name": "Network Agent", "state": "running", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": null, "created": "2016-09-22T17:58:57Z", "createdTS": 1474567137000, "dataVolumeMounts": {}, "dataVolumes": [ "/var/lib/rancher/etc:/var/lib/rancher/etc:ro" ], "dataVolumesFrom": null, "deploymentUnitUuid": null, "description": null, "devices": null, "dns": null, "dnsSearch": null, "domainName": null, "entryPoint": null, "environment": null, "expose": null, "externalId": "129c67adc9fa084fbc8e1f963db0180896b88af3dca69c7e8fe0493284e4651c", "extraHosts": null, "firstRunning": "2016-09-22T17:59:11Z", "firstRunningTS": 1474567151000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:rancher/agent-instance:v0.8.3", "kind": "container", "labels": { "io.rancher.container.system": "NetworkAgent", "io.rancher.container.uuid": "b0f88089-d28b-4388-93a7-889b750cd7cb", "io.rancher.container.name": "Network Agent", "io.rancher.container.agent_id": "2", "io.rancher.container.ip": "10.42.247.188/16" }, "logConfig": null, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkIds": [], "networkMode": null, "pidMode": null, "ports": null, "primaryIpAddress": "10.42.247.188", "privileged": true, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 3, "startOnCreate": true, "stdinOpen": false, "systemContainer": "NetworkAgent", "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "tty": false, "user": null, "uuid": "b0f88089-d28b-4388-93a7-889b750cd7cb", "version": "0", "volumeDriver": null, "workingDir": null } ], "sortLinks": { "accountId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=accountId", "agentId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=agentId", "allocationState": "http://172.30.0.100:8080/v1/containers?limit=2&sort=allocationState", "compute": "http://172.30.0.100:8080/v1/containers?limit=2&sort=compute", "createIndex": "http://172.30.0.100:8080/v1/containers?limit=2&sort=createIndex", "created": "http://172.30.0.100:8080/v1/containers?limit=2&sort=created", "deploymentUnitUuid": "http://172.30.0.100:8080/v1/containers?limit=2&sort=deploymentUnitUuid", "description": "http://172.30.0.100:8080/v1/containers?limit=2&sort=description", "dnsInternal": "http://172.30.0.100:8080/v1/containers?limit=2&sort=dnsInternal", "dnsSearchInternal": "http://172.30.0.100:8080/v1/containers?limit=2&sort=dnsSearchInternal", "domain": "http://172.30.0.100:8080/v1/containers?limit=2&sort=domain", "externalId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=externalId", "firstRunning": "http://172.30.0.100:8080/v1/containers?limit=2&sort=firstRunning", "healthState": "http://172.30.0.100:8080/v1/containers?limit=2&sort=healthState", "healthUpdated": "http://172.30.0.100:8080/v1/containers?limit=2&sort=healthUpdated", "hostname": "http://172.30.0.100:8080/v1/containers?limit=2&sort=hostname", "id": "http://172.30.0.100:8080/v1/containers?limit=2&sort=id", "imageId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=imageId", "instanceTriggeredStop": "http://172.30.0.100:8080/v1/containers?limit=2&sort=instanceTriggeredStop", "kind": "http://172.30.0.100:8080/v1/containers?limit=2&sort=kind", "memoryMb": "http://172.30.0.100:8080/v1/containers?limit=2&sort=memoryMb", "name": "http://172.30.0.100:8080/v1/containers?limit=2&sort=name", "nativeContainer": "http://172.30.0.100:8080/v1/containers?limit=2&sort=nativeContainer", "networkContainerId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=networkContainerId", "offeringId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=offeringId", "registryCredentialId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=registryCredentialId", "removeTime": "http://172.30.0.100:8080/v1/containers?limit=2&sort=removeTime", "removed": "http://172.30.0.100:8080/v1/containers?limit=2&sort=removed", "serviceIndexId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=serviceIndexId", "startCount": "http://172.30.0.100:8080/v1/containers?limit=2&sort=startCount", "state": "http://172.30.0.100:8080/v1/containers?limit=2&sort=state", "systemContainer": "http://172.30.0.100:8080/v1/containers?limit=2&sort=systemContainer", "token": "http://172.30.0.100:8080/v1/containers?limit=2&sort=token", "userdata": "http://172.30.0.100:8080/v1/containers?limit=2&sort=userdata", "uuid": "http://172.30.0.100:8080/v1/containers?limit=2&sort=uuid", "version": "http://172.30.0.100:8080/v1/containers?limit=2&sort=version", "zoneId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=zoneId" }, "pagination": { "first": null, "previous": null, "next": "http://172.30.0.100:8080/v1/containers?limit=2&marker=m2", "limit": 2, "total": null, "partial": true }, "sort": null, "filters": { "accountId": null, "agentId": null, "allocationState": null, "compute": null, "createIndex": null, "created": null, "deploymentUnitUuid": null, "description": null, "dnsInternal": null, "dnsSearchInternal": null, "domain": null, "externalId": null, "firstRunning": null, "healthState": null, "healthUpdated": null, "hostname": null, "id": null, "imageId": null, "instanceTriggeredStop": null, "kind": null, "memoryMb": null, "name": null, "nativeContainer": null, "networkContainerId": null, "offeringId": null, "registryCredentialId": null, "removeTime": null, "removed": null, "serviceIndexId": null, "startCount": null, "state": null, "systemContainer": null, "token": null, "userdata": null, "uuid": null, "version": null, "zoneId": null }, "createDefaults": {} }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_activate_service.json0000664000175000017500000001322313153541406031361 0ustar kamikami00000000000000{ "id": "1s6", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s6", "account": "http://172.30.0.100:8080/v1/services/1s6/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s6/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s6/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s6/environment", "instances": "http://172.30.0.100:8080/v1/services/1s6/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s6/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s6/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s6/?action=update", "restart": "http://172.30.0.100:8080/v1/services/1s6/?action=restart", "remove": "http://172.30.0.100:8080/v1/services/1s6/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s6/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=removeservicelink", "upgrade": "http://172.30.0.100:8080/v1/services/1s6/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=addservicelink", "deactivate": "http://172.30.0.100:8080/v1/services/1s6/?action=deactivate" }, "name": "123", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T18:28:04Z", "createdTS": 1474568884000, "currentScale": 1, "description": null, "environmentId": "1e2", "externalId": null, "fqdn": null, "healthState": "healthy", "kind": "service", "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "metadata": null, "publicEndpoints": null, "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": { "inServiceStrategy": { "batchSize": 1, "intervalMillis": 2000, "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousLaunchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousSecondaryLaunchConfigs": [], "secondaryLaunchConfigs": [], "startFirst": true }, "toServiceStrategy": null }, "uuid": "c0ae4d08-e20a-45ef-9fb9-ad9f7cdeeb15", "vip": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_search_containers.json0000664000175000017500000002163213153541406031536 0ustar kamikami00000000000000{ "type": "collection", "resourceType": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers" }, "createTypes": { "container": "http://172.30.0.100:8080/v1/containers", "launchConfig": "http://172.30.0.100:8080/v1/launchconfigs", "virtualMachine": "http://172.30.0.100:8080/v1/virtualmachines" }, "actions": {}, "data": [ { "id": "1i2", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i2", "account": "http://172.30.0.100:8080/v1/containers/1i2/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i2/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i2/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i2/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i2/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i2/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i2/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i2/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i2/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i2/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i2/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i2/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i2/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i2/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i2/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i2/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i2/?action=update", "stop": "http://172.30.0.100:8080/v1/containers/1i2/?action=stop", "restart": "http://172.30.0.100:8080/v1/containers/1i2/?action=restart", "migrate": "http://172.30.0.100:8080/v1/containers/1i2/?action=migrate", "logs": "http://172.30.0.100:8080/v1/containers/1i2/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i2/?action=setlabels", "execute": "http://172.30.0.100:8080/v1/containers/1i2/?action=execute", "proxy": "http://172.30.0.100:8080/v1/containers/1i2/?action=proxy" }, "name": "Network Agent", "state": "running", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": null, "created": "2016-09-22T17:58:57Z", "createdTS": 1474567137000, "dataVolumeMounts": {}, "dataVolumes": [ "/var/lib/rancher/etc:/var/lib/rancher/etc:ro" ], "dataVolumesFrom": null, "deploymentUnitUuid": null, "description": null, "devices": null, "dns": null, "dnsSearch": null, "domainName": null, "entryPoint": null, "environment": null, "expose": null, "externalId": "129c67adc9fa084fbc8e1f963db0180896b88af3dca69c7e8fe0493284e4651c", "extraHosts": null, "firstRunning": "2016-09-22T17:59:11Z", "firstRunningTS": 1474567151000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:rancher/agent-instance:v0.8.3", "kind": "container", "labels": { "io.rancher.container.system": "NetworkAgent", "io.rancher.container.uuid": "b0f88089-d28b-4388-93a7-889b750cd7cb", "io.rancher.container.name": "Network Agent", "io.rancher.container.agent_id": "2", "io.rancher.container.ip": "10.42.247.188/16" }, "logConfig": null, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkIds": [], "networkMode": null, "pidMode": null, "ports": null, "primaryIpAddress": "10.42.247.188", "privileged": true, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 3, "startOnCreate": true, "stdinOpen": false, "systemContainer": "NetworkAgent", "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "tty": false, "user": null, "uuid": "b0f88089-d28b-4388-93a7-889b750cd7cb", "version": "0", "volumeDriver": null, "workingDir": null } ], "sortLinks": { "accountId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=accountId", "agentId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=agentId", "allocationState": "http://172.30.0.100:8080/v1/containers?limit=2&sort=allocationState", "compute": "http://172.30.0.100:8080/v1/containers?limit=2&sort=compute", "createIndex": "http://172.30.0.100:8080/v1/containers?limit=2&sort=createIndex", "created": "http://172.30.0.100:8080/v1/containers?limit=2&sort=created", "deploymentUnitUuid": "http://172.30.0.100:8080/v1/containers?limit=2&sort=deploymentUnitUuid", "description": "http://172.30.0.100:8080/v1/containers?limit=2&sort=description", "dnsInternal": "http://172.30.0.100:8080/v1/containers?limit=2&sort=dnsInternal", "dnsSearchInternal": "http://172.30.0.100:8080/v1/containers?limit=2&sort=dnsSearchInternal", "domain": "http://172.30.0.100:8080/v1/containers?limit=2&sort=domain", "externalId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=externalId", "firstRunning": "http://172.30.0.100:8080/v1/containers?limit=2&sort=firstRunning", "healthState": "http://172.30.0.100:8080/v1/containers?limit=2&sort=healthState", "healthUpdated": "http://172.30.0.100:8080/v1/containers?limit=2&sort=healthUpdated", "hostname": "http://172.30.0.100:8080/v1/containers?limit=2&sort=hostname", "id": "http://172.30.0.100:8080/v1/containers?limit=2&sort=id", "imageId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=imageId", "instanceTriggeredStop": "http://172.30.0.100:8080/v1/containers?limit=2&sort=instanceTriggeredStop", "kind": "http://172.30.0.100:8080/v1/containers?limit=2&sort=kind", "memoryMb": "http://172.30.0.100:8080/v1/containers?limit=2&sort=memoryMb", "name": "http://172.30.0.100:8080/v1/containers?limit=2&sort=name", "nativeContainer": "http://172.30.0.100:8080/v1/containers?limit=2&sort=nativeContainer", "networkContainerId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=networkContainerId", "offeringId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=offeringId", "registryCredentialId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=registryCredentialId", "removeTime": "http://172.30.0.100:8080/v1/containers?limit=2&sort=removeTime", "removed": "http://172.30.0.100:8080/v1/containers?limit=2&sort=removed", "serviceIndexId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=serviceIndexId", "startCount": "http://172.30.0.100:8080/v1/containers?limit=2&sort=startCount", "state": "http://172.30.0.100:8080/v1/containers?limit=2&sort=state", "systemContainer": "http://172.30.0.100:8080/v1/containers?limit=2&sort=systemContainer", "token": "http://172.30.0.100:8080/v1/containers?limit=2&sort=token", "userdata": "http://172.30.0.100:8080/v1/containers?limit=2&sort=userdata", "uuid": "http://172.30.0.100:8080/v1/containers?limit=2&sort=uuid", "version": "http://172.30.0.100:8080/v1/containers?limit=2&sort=version", "zoneId": "http://172.30.0.100:8080/v1/containers?limit=2&sort=zoneId" }, "pagination": { "first": null, "previous": null, "next": "http://172.30.0.100:8080/v1/containers?limit=2&marker=m2", "limit": 2, "total": null, "partial": true }, "sort": null, "filters": { "accountId": null, "agentId": null, "allocationState": null, "compute": null, "createIndex": null, "created": null, "deploymentUnitUuid": null, "description": null, "dnsInternal": null, "dnsSearchInternal": null, "domain": null, "externalId": null, "firstRunning": null, "healthState": null, "healthUpdated": null, "hostname": null, "id": null, "imageId": null, "instanceTriggeredStop": null, "kind": null, "memoryMb": null, "name": null, "nativeContainer": null, "networkContainerId": null, "offeringId": null, "registryCredentialId": null, "removeTime": null, "removed": null, "serviceIndexId": null, "startCount": null, "state": null, "systemContainer": null, "token": null, "userdata": null, "uuid": null, "version": null, "zoneId": null }, "createDefaults": {} }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_search_services.json0000664000175000017500000002750013153541406031214 0ustar kamikami00000000000000{ "type": "collection", "resourceType": "service", "links": { "self": "http://172.30.0.100:8080/v1/services" }, "createTypes": { "dnsService": "http://172.30.0.100:8080/v1/dnsservices", "externalService": "http://172.30.0.100:8080/v1/externalservices", "loadBalancerService": "http://172.30.0.100:8080/v1/loadbalancerservices", "service": "http://172.30.0.100:8080/v1/services" }, "actions": {}, "data": [ { "id": "1s3", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s3", "account": "http://172.30.0.100:8080/v1/services/1s3/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s3/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s3/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s3/environment", "instances": "http://172.30.0.100:8080/v1/services/1s3/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s3/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s3/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s3/?action=update", "restart": "http://172.30.0.100:8080/v1/services/1s3/?action=restart", "remove": "http://172.30.0.100:8080/v1/services/1s3/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s3/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s3/?action=removeservicelink", "upgrade": "http://172.30.0.100:8080/v1/services/1s3/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s3/?action=addservicelink", "deactivate": "http://172.30.0.100:8080/v1/services/1s3/?action=deactivate" }, "name": "ghost", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T18:00:26Z", "createdTS": 1474567226000, "currentScale": 1, "description": null, "environmentId": "1e2", "externalId": null, "fqdn": null, "healthState": "healthy", "kind": "service", "launchConfig": { "imageUuid": "docker:ghost", "kind": "container", "labels": { "io.rancher.service.hash": "af9b5ddd2891271b12a88129e2bc37ff57998182" }, "logConfig": {}, "networkMode": "managed", "ports": [ "80:2368/tcp" ], "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": false, "tty": false, "version": "0", "vcpu": 1 }, "metadata": { "io.rancher.service.hash": "f49280e1f709117b76693b638834791e4f4ef0fd" }, "publicEndpoints": [ { "ipAddress": "172.30.0.101", "port": 80, "serviceId": "1s3", "hostId": "1h1", "instanceId": "1i5" } ], "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": false, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "c2ce7050-e5b5-42c7-a9fb-8ed8d33a4884", "vip": null }, { "id": "1s6", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s6", "account": "http://172.30.0.100:8080/v1/services/1s6/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s6/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s6/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s6/environment", "instances": "http://172.30.0.100:8080/v1/services/1s6/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s6/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s6/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s6/?action=update", "restart": "http://172.30.0.100:8080/v1/services/1s6/?action=restart", "remove": "http://172.30.0.100:8080/v1/services/1s6/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s6/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=removeservicelink", "upgrade": "http://172.30.0.100:8080/v1/services/1s6/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=addservicelink", "deactivate": "http://172.30.0.100:8080/v1/services/1s6/?action=deactivate" }, "name": "123", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T18:28:04Z", "createdTS": 1474568884000, "currentScale": 1, "description": null, "environmentId": "1e2", "externalId": null, "fqdn": null, "healthState": "healthy", "kind": "service", "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "metadata": null, "publicEndpoints": null, "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": { "inServiceStrategy": { "batchSize": 1, "intervalMillis": 2000, "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousLaunchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousSecondaryLaunchConfigs": [], "secondaryLaunchConfigs": [], "startFirst": true }, "toServiceStrategy": null }, "uuid": "c0ae4d08-e20a-45ef-9fb9-ad9f7cdeeb15", "vip": null } ], "sortLinks": { "accountId": "http://172.30.0.100:8080/v1/services?limit=4&sort=accountId", "createIndex": "http://172.30.0.100:8080/v1/services?limit=4&sort=createIndex", "created": "http://172.30.0.100:8080/v1/services?limit=4&sort=created", "description": "http://172.30.0.100:8080/v1/services?limit=4&sort=description", "environmentId": "http://172.30.0.100:8080/v1/services?limit=4&sort=environmentId", "externalId": "http://172.30.0.100:8080/v1/services?limit=4&sort=externalId", "healthState": "http://172.30.0.100:8080/v1/services?limit=4&sort=healthState", "id": "http://172.30.0.100:8080/v1/services?limit=4&sort=id", "kind": "http://172.30.0.100:8080/v1/services?limit=4&sort=kind", "name": "http://172.30.0.100:8080/v1/services?limit=4&sort=name", "removeTime": "http://172.30.0.100:8080/v1/services?limit=4&sort=removeTime", "removed": "http://172.30.0.100:8080/v1/services?limit=4&sort=removed", "selectorContainer": "http://172.30.0.100:8080/v1/services?limit=4&sort=selectorContainer", "selectorLink": "http://172.30.0.100:8080/v1/services?limit=4&sort=selectorLink", "state": "http://172.30.0.100:8080/v1/services?limit=4&sort=state", "uuid": "http://172.30.0.100:8080/v1/services?limit=4&sort=uuid", "vip": "http://172.30.0.100:8080/v1/services?limit=4&sort=vip" }, "pagination": { "first": null, "previous": null, "next": "http://172.30.0.100:8080/v1/services?limit=4&marker=m4", "limit": 4, "total": null, "partial": true }, "sort": null, "filters": { "accountId": null, "createIndex": null, "created": null, "description": null, "environmentId": null, "externalId": null, "healthState": null, "id": null, "kind": null, "name": null, "removeTime": null, "removed": null, "selectorContainer": null, "selectorLink": null, "state": null, "uuid": null, "vip": null }, "createDefaults": {} }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/deploy_container.json0000664000175000017500000000734213153541406030710 0ustar kamikami00000000000000{ "id": "1i31", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i31", "account": "http://172.30.0.100:8080/v1/containers/1i31/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i31/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i31/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i31/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i31/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i31/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i31/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i31/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i31/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i31/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i31/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i31/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i31/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i31/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i31/?action=update", "error": "http://172.30.0.100:8080/v1/containers/1i31/?action=error", "remove": "http://172.30.0.100:8080/v1/containers/1i31/?action=remove", "start": "http://172.30.0.100:8080/v1/containers/1i31/?action=start", "logs": "http://172.30.0.100:8080/v1/containers/1i31/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i31/?action=setlabels" }, "name": "newcontainer", "state": "running", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": null, "created": "2016-10-06T15:55:58Z", "createdTS": 1475769358000, "dataVolumeMounts": {}, "dataVolumes": [], "dataVolumesFrom": null, "deploymentUnitUuid": null, "description": null, "devices": null, "dns": [ "169.254.169.250" ], "dnsSearch": [ "rancher.internal" ], "dockerPorts": [], "domainName": null, "entryPoint": null, "environment": { "STORAGE_TYPE": "file" }, "expose": null, "externalId": "ab334bd25d25db7b94fdcead8f5c023b05bed424f56243187aa90f5ef7f07b09", "extraHosts": null, "firstRunning": "2016-10-06T15:56:00Z", "firstRunningTS": 1475769360000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:rlister/hastebin:latest", "kind": "container", "labels": { "io.rancher.container.uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "io.rancher.container.name": "newcontainer", "io.rancher.container.ip": "10.42.204.104/16" }, "logConfig": null, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkMode": "managed", "pidMode": null, "ports": null, "primaryIpAddress": "10.42.204.104", "privileged": false, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 1, "startOnCreate": true, "stdinOpen": false, "systemContainer": null, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "tty": false, "user": null, "uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "version": "0", "volumeDriver": null, "workingDir": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_destroy_service.json0000664000175000017500000000414713153541406031257 0ustar kamikami00000000000000{ "id": "1s13", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s13", "account": "http://172.30.0.100:8080/v1/services/1s13/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s13/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s13/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s13/environment", "instances": "http://172.30.0.100:8080/v1/services/1s13/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s13/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s13/containerstats" }, "actions": { "setservicelinks": "http://172.30.0.100:8080/v1/services/1s13/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s13/?action=removeservicelink", "addservicelink": "http://172.30.0.100:8080/v1/services/1s13/?action=addservicelink" }, "name": "newservice", "state": "removing", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-10-06T15:31:35Z", "createdTS": 1475767895000, "currentScale": 1, "description": null, "environmentId": "1e6", "externalId": null, "fqdn": null, "healthState": "unhealthy", "kind": "service", "launchConfig": { "environment": { "root_password": "password" }, "imageUuid": "docker:rlister/hastebin:latest", "kind": "container", "networkMode": "managed", "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": false, "tty": false, "version": "0", "vcpu": 1 }, "metadata": null, "publicEndpoints": null, "removed": "2016-10-06T15:45:03Z", "removedTS": 1475768703000, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "de2cd54f-9936-49fb-a41a-35653c4510f7", "vip": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/start_container.json0000664000175000017500000000734413153541406030553 0ustar kamikami00000000000000{ "id": "1i31", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i31", "account": "http://172.30.0.100:8080/v1/containers/1i31/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i31/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i31/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i31/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i31/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i31/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i31/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i31/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i31/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i31/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i31/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i31/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i31/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i31/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i31/?action=update", "error": "http://172.30.0.100:8080/v1/containers/1i31/?action=error", "remove": "http://172.30.0.100:8080/v1/containers/1i31/?action=remove", "start": "http://172.30.0.100:8080/v1/containers/1i31/?action=start", "logs": "http://172.30.0.100:8080/v1/containers/1i31/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i31/?action=setlabels" }, "name": "newcontainer", "state": "starting", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": null, "created": "2016-10-06T15:55:58Z", "createdTS": 1475769358000, "dataVolumeMounts": {}, "dataVolumes": [], "dataVolumesFrom": null, "deploymentUnitUuid": null, "description": null, "devices": null, "dns": [ "169.254.169.250" ], "dnsSearch": [ "rancher.internal" ], "dockerPorts": [], "domainName": null, "entryPoint": null, "environment": { "STORAGE_TYPE": "file" }, "expose": null, "externalId": "ab334bd25d25db7b94fdcead8f5c023b05bed424f56243187aa90f5ef7f07b09", "extraHosts": null, "firstRunning": "2016-10-06T15:56:00Z", "firstRunningTS": 1475769360000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:rlister/hastebin:latest", "kind": "container", "labels": { "io.rancher.container.uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "io.rancher.container.name": "newcontainer", "io.rancher.container.ip": "10.42.204.104/16" }, "logConfig": null, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkMode": "managed", "pidMode": null, "ports": null, "primaryIpAddress": "10.42.204.104", "privileged": false, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 1, "startOnCreate": true, "stdinOpen": false, "systemContainer": null, "transitioning": "yes", "transitioningMessage": null, "transitioningProgress": null, "tty": false, "user": null, "uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "version": "0", "volumeDriver": null, "workingDir": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_destroy_stack.json0000664000175000017500000000256613153541406030727 0ustar kamikami00000000000000{ "id": "1e10", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e10", "account": "http://172.30.0.100:8080/v1/environments/1e10/account", "services": "http://172.30.0.100:8080/v1/environments/1e10/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e10/composeconfig" }, "actions": { "addoutputs": "http://172.30.0.100:8080/v1/environments/1e10/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e10/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e10/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e10/?action=exportconfig" }, "name": "newstack1", "state": "removing", "accountId": "1a5", "created": "2016-10-06T14:15:41Z", "createdTS": 1475763341000, "description": null, "dockerCompose": null, "environment": { "root_password": "password" }, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": "2016-10-06T14:56:16Z", "removedTS": 1475765776000, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "289f380f-d00a-4faf-b69f-53a559dbfd05" }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_deploy_service.json0000664000175000017500000000414513153541406031060 0ustar kamikami00000000000000{ "id": "1s13", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s13", "account": "http://172.30.0.100:8080/v1/services/1s13/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s13/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s13/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s13/environment", "instances": "http://172.30.0.100:8080/v1/services/1s13/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s13/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s13/containerstats" }, "actions": { "setservicelinks": "http://172.30.0.100:8080/v1/services/1s13/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s13/?action=removeservicelink", "addservicelink": "http://172.30.0.100:8080/v1/services/1s13/?action=addservicelink" }, "name": "newservice", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-10-06T15:31:35Z", "createdTS": 1475767895000, "currentScale": 1, "description": null, "environmentId": "1e6", "externalId": null, "fqdn": null, "healthState": "unhealthy", "kind": "service", "launchConfig": { "environment": { "root_password": "password" }, "imageUuid": "docker:rlister/hastebin:latest", "kind": "container", "networkMode": "managed", "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": false, "tty": false, "version": "0", "vcpu": 1 }, "metadata": null, "publicEndpoints": null, "removed": "2016-10-06T15:45:03Z", "removedTS": 1475768703000, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "de2cd54f-9936-49fb-a41a-35653c4510f7", "vip": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_activate_stack.json0000664000175000017500000001453313153541406031033 0ustar kamikami00000000000000{ "id": "1e1", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e1", "account": "http://172.30.0.100:8080/v1/environments/1e1/account", "services": "http://172.30.0.100:8080/v1/environments/1e1/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e1/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e1/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e1/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e1/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e1/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e1/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e1/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e1/?action=exportconfig" }, "name": "Concrete5", "state": "active", "accountId": "1a5", "created": "2016-09-22T17:58:53Z", "createdTS": 1474567133000, "description": null, "dockerCompose": "CMSMysql:\r\n environment:\r\n MYSQL_ROOT_PASSWORD: ${root_password}\r\n MYSQL_DATABASE: ${db_name}\r\n MYSQL_USER: ${db_username}\r\n MYSQL_PASSWORD: ${db_password}\r\n labels:\r\n io.rancher.container.pull_image: always\r\n tty: true\r\n image: mysql\r\n volumes:\r\n - ${db_data_location}:/var/lib/mysql\r\n stdin_open: true\r\n volume_driver: ${volume_driver}\r\n\r\nCMSConfig:\r\n image: opensaas/concrete5\r\n tty: true\r\n stdin_open: true\r\n links:\r\n - CMSMysql:mysql\r\n volumes:\r\n - ${cms_application_data}:/var/www/html/application\r\n - ${cms_packages_data}:/var/www/html/packages\r\n labels:\r\n io.rancher.container.hostname_override: container_name\r\n io.rancher.container.start_once: true\r\n volume_driver: ${volume_driver}\r\n command: bash -c \"chown -R www-data. application; chown -R www-data. packages; sleep 2m; php -f concrete/bin/concrete5.php c5:install --db-server=mysql --db-username=${db_username} --db-password=${db_password} --db-database=${db_name} --site=${cms_sitename} --admin-email=${cms_admin_email} --admin-password=${cms_admin_password} -n -vvv\"\r\n\r\nConcrete5App:\r\n labels:\r\n io.rancher.container.pull_image: always\r\n io.rancher.sidekicks: CMSConfig\r\n tty: true\r\n links:\r\n - CMSMysql:mysql\r\n image: opensaas/concrete5\r\n volumes:\r\n - ${cms_application_data}:/var/www/html/application\r\n - ${cms_packages_data}:/var/www/html/packages\r\n volume_driver: ${volume_driver}\r\n stdin_open: true", "environment": { "root_password": "password", "db_name": "CMS_DB", "db_username": "CMS_USER", "db_password": "password", "db_data_location": "CMS_DB_DATA", "volume_driver": "", "cms_application_data": "CMS_APP_DATA", "cms_packages_data": "CMS_PACK_DATA", "cms_admin_email": "admin@example.com", "cms_admin_password": "password", "cms_sitename": "MySite" }, "externalId": "catalog://community:Concrete5:1", "healthState": "unhealthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": ".catalog:\r\n name: \"Concrete5\"\r\n version: \"v0.2\"\r\n description: \"Concrete5 CMS\"\r\n uuid: Concrete5-1\r\n minimum_rancher_version: v0.51.0\r\n questions:\r\n - variable: root_password\r\n description: \"MySQL root password - keep this password complex and secure\"\r\n label: \"MySQL Root Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: db_name\r\n description: \"MySQL Database Name - to use in the Concrete5 setup\"\r\n label: \"MySQL Database Name\"\r\n required: true\r\n default: \"CMS_DB\"\r\n type: \"string\"\r\n - variable: db_username\r\n description: \"MySQL Username - to use in the Concrete5 setup\"\r\n label: \"MySQL Username\"\r\n required: true\r\n default: \"CMS_USER\"\r\n type: \"string\"\r\n - variable: db_password\r\n description: \"MySQL password for the above user - to use in the Concrete5 setup\"\r\n label: \"MySQL Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: db_data_location\r\n description: \"Location on the host for the database files\"\r\n label: \"DB Data Location\"\r\n required: true\r\n default: \"CMS_DB_DATA\"\r\n type: \"string\"\r\n - variable: volume_driver\r\n description: \"Volume Driver for the persistant data locations requires docker 1.7\"\r\n label: \"Volume Driver\"\r\n type: \"string\"\r\n - variable: cms_application_data\r\n description: \"Concrcte5 application folder for persistant data storage\"\r\n label: \"Application Folder\"\r\n required: true\r\n default: \"CMS_APP_DATA\"\r\n type: \"string\"\r\n - variable: cms_packages_data\r\n description: \"Concrcte5 packages folder for persistant data storage\"\r\n label: \"Packages Folder\"\r\n required: true\r\n default: \"CMS_PACK_DATA\"\r\n type: \"string\"\r\n - variable: cms_admin_email\r\n description: \"Concrcete5 admin email address\"\r\n label: \"Admin Email\"\r\n required: true\r\n default: \"admin@example.com\"\r\n type: \"string\"\r\n - variable: cms_admin_password\r\n description: \"Concrcete5 admin password\"\r\n label: \"Admin Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: cms_sitename\r\n description: \"Concrcete5 Sitename\"\r\n label: \"Sitename\"\r\n required: true\r\n default: \"MySite\"\r\n type: \"string\"\r\nCMSMysql:\r\n scale: 1\r\nConcrete5App:\r\n scale: 1\r\n\r\n", "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "e129ed27-3823-4e2e-af62-4565d68995d4" }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/stop_container.json0000664000175000017500000000734413153541406030403 0ustar kamikami00000000000000{ "id": "1i31", "type": "container", "links": { "self": "http://172.30.0.100:8080/v1/containers/1i31", "account": "http://172.30.0.100:8080/v1/containers/1i31/account", "credentials": "http://172.30.0.100:8080/v1/containers/1i31/credentials", "healthcheckInstanceHostMaps": "http://172.30.0.100:8080/v1/containers/1i31/healthcheckinstancehostmaps", "hosts": "http://172.30.0.100:8080/v1/containers/1i31/hosts", "instanceLabels": "http://172.30.0.100:8080/v1/containers/1i31/instancelabels", "instanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/instancelinks", "instances": "http://172.30.0.100:8080/v1/containers/1i31/instances", "mounts": "http://172.30.0.100:8080/v1/containers/1i31/mounts", "ports": "http://172.30.0.100:8080/v1/containers/1i31/ports", "serviceEvents": "http://172.30.0.100:8080/v1/containers/1i31/serviceevents", "serviceExposeMaps": "http://172.30.0.100:8080/v1/containers/1i31/serviceexposemaps", "services": "http://172.30.0.100:8080/v1/containers/1i31/services", "targetInstanceLinks": "http://172.30.0.100:8080/v1/containers/1i31/targetinstancelinks", "volumes": "http://172.30.0.100:8080/v1/containers/1i31/volumes", "stats": "http://172.30.0.100:8080/v1/containers/1i31/stats", "containerStats": "http://172.30.0.100:8080/v1/containers/1i31/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/containers/1i31/?action=update", "error": "http://172.30.0.100:8080/v1/containers/1i31/?action=error", "remove": "http://172.30.0.100:8080/v1/containers/1i31/?action=remove", "start": "http://172.30.0.100:8080/v1/containers/1i31/?action=start", "logs": "http://172.30.0.100:8080/v1/containers/1i31/?action=logs", "setlabels": "http://172.30.0.100:8080/v1/containers/1i31/?action=setlabels" }, "name": "newcontainer", "state": "stopping", "accountId": "1a5", "blkioDeviceOptions": null, "build": null, "capAdd": null, "capDrop": null, "command": null, "cpuSet": null, "cpuShares": null, "createIndex": null, "created": "2016-10-06T15:55:58Z", "createdTS": 1475769358000, "dataVolumeMounts": {}, "dataVolumes": [], "dataVolumesFrom": null, "deploymentUnitUuid": null, "description": null, "devices": null, "dns": [ "169.254.169.250" ], "dnsSearch": [ "rancher.internal" ], "dockerPorts": [], "domainName": null, "entryPoint": null, "environment": { "STORAGE_TYPE": "file" }, "expose": null, "externalId": "ab334bd25d25db7b94fdcead8f5c023b05bed424f56243187aa90f5ef7f07b09", "extraHosts": null, "firstRunning": "2016-10-06T15:56:00Z", "firstRunningTS": 1475769360000, "healthCheck": null, "healthState": null, "hostId": "1h1", "hostname": null, "imageUuid": "docker:rlister/hastebin:latest", "kind": "container", "labels": { "io.rancher.container.uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "io.rancher.container.name": "newcontainer", "io.rancher.container.ip": "10.42.204.104/16" }, "logConfig": null, "lxcConf": null, "memory": null, "memorySwap": null, "nativeContainer": false, "networkContainerId": null, "networkMode": "managed", "pidMode": null, "ports": null, "primaryIpAddress": "10.42.204.104", "privileged": false, "publishAllPorts": false, "readOnly": false, "registryCredentialId": null, "removed": null, "requestedHostId": null, "restartPolicy": null, "securityOpt": null, "startCount": 1, "startOnCreate": true, "stdinOpen": false, "systemContainer": null, "transitioning": "yes", "transitioningMessage": null, "transitioningProgress": null, "tty": false, "user": null, "uuid": "6d3dcf5f-28b8-4e60-9bf1-618b76a9a805", "version": "0", "volumeDriver": null, "workingDir": null }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_list_services.json0000664000175000017500000004576613153541406030740 0ustar kamikami00000000000000{ "type": "collection", "resourceType": "service", "links": { "self": "http://172.30.0.100:8080/v1/services" }, "createTypes": { "dnsService": "http://172.30.0.100:8080/v1/dnsservices", "externalService": "http://172.30.0.100:8080/v1/externalservices", "loadBalancerService": "http://172.30.0.100:8080/v1/loadbalancerservices", "service": "http://172.30.0.100:8080/v1/services" }, "actions": {}, "data": [ { "id": "1s1", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s1", "account": "http://172.30.0.100:8080/v1/services/1s1/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s1/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s1/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s1/environment", "instances": "http://172.30.0.100:8080/v1/services/1s1/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s1/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s1/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s1/?action=update", "remove": "http://172.30.0.100:8080/v1/services/1s1/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s1/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s1/?action=removeservicelink", "activate": "http://172.30.0.100:8080/v1/services/1s1/?action=activate", "upgrade": "http://172.30.0.100:8080/v1/services/1s1/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s1/?action=addservicelink" }, "name": "CMSMysql", "state": "inactive", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T17:58:54Z", "createdTS": 1474567134000, "currentScale": 1, "description": null, "environmentId": "1e1", "externalId": null, "fqdn": null, "healthState": "unhealthy", "kind": "service", "launchConfig": { "dataVolumes": [ "CMS_DB_DATA:/var/lib/mysql" ], "environment": { "MYSQL_DATABASE": "CMS_DB", "MYSQL_PASSWORD": "password", "MYSQL_ROOT_PASSWORD": "password", "MYSQL_USER": "CMS_USER" }, "imageUuid": "docker:mysql", "kind": "container", "labels": { "io.rancher.container.pull_image": "always", "io.rancher.service.hash": "c5d6bacef4be47a5ca5d1517a1d33319d024cdde" }, "logConfig": {}, "networkMode": "managed", "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": true, "tty": true, "version": "0", "vcpu": 1 }, "metadata": { "io.rancher.service.hash": "41ccfd7f8023f2efcc758a925a9f461d607c990d" }, "publicEndpoints": null, "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": false, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "85f4e9fc-c311-45a0-9a1a-a9da229aaa2a", "vip": null }, { "id": "1s2", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s2", "account": "http://172.30.0.100:8080/v1/services/1s2/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s2/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s2/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s2/environment", "instances": "http://172.30.0.100:8080/v1/services/1s2/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s2/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s2/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s2/?action=update", "remove": "http://172.30.0.100:8080/v1/services/1s2/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s2/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s2/?action=removeservicelink", "activate": "http://172.30.0.100:8080/v1/services/1s2/?action=activate", "upgrade": "http://172.30.0.100:8080/v1/services/1s2/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s2/?action=addservicelink" }, "name": "Concrete5App", "state": "inactive", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 2, "created": "2016-09-22T17:58:54Z", "createdTS": 1474567134000, "currentScale": 1, "description": null, "environmentId": "1e1", "externalId": null, "fqdn": null, "healthState": "degraded", "kind": "service", "launchConfig": { "dataVolumes": [ "CMS_APP_DATA:/var/www/html/application", "CMS_PACK_DATA:/var/www/html/packages" ], "imageUuid": "docker:opensaas/concrete5", "kind": "container", "labels": { "io.rancher.container.pull_image": "always", "io.rancher.service.hash": "6033469e7596fd16896eb87b3a99f50af2dbf522", "io.rancher.sidekicks": "CMSConfig" }, "logConfig": {}, "networkMode": "managed", "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": true, "tty": true, "version": "0", "vcpu": 1 }, "metadata": { "io.rancher.service.hash": "037726f2f6d389022953a652a5d3e5775415d0b0" }, "publicEndpoints": null, "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [ { "command": [ "bash", "-c", "chown -R www-data. application; chown -R www-data. packages; sleep 2m; php -f concrete/bin/concrete5.php c5:install --db-server=mysql --db-username=CMS_USER --db-password=password --db-database=CMS_DB --site=MySite --admin-email=admin@example.com --admin-password=password -n -vvv" ], "dataVolumes": [ "CMS_APP_DATA:/var/www/html/application", "CMS_PACK_DATA:/var/www/html/packages" ], "imageUuid": "docker:opensaas/concrete5", "labels": { "io.rancher.container.hostname_override": "container_name", "io.rancher.container.start_once": "true", "io.rancher.service.hash": "db08c70dbb11ff2955a5cac93c2c6e5e1ac95e7e" }, "logConfig": {}, "name": "CMSConfig", "networkMode": "managed", "startOnCreate": true, "stdinOpen": true, "tty": true, "kind": "container", "privileged": false, "publishAllPorts": false, "readOnly": false, "vcpu": 1, "version": "0" } ], "selectorContainer": null, "selectorLink": null, "startOnCreate": false, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "1d824bd6-e18b-4ac4-8ea1-5cd0a3c7e234", "vip": null }, { "id": "1s3", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s3", "account": "http://172.30.0.100:8080/v1/services/1s3/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s3/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s3/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s3/environment", "instances": "http://172.30.0.100:8080/v1/services/1s3/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s3/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s3/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s3/?action=update", "restart": "http://172.30.0.100:8080/v1/services/1s3/?action=restart", "remove": "http://172.30.0.100:8080/v1/services/1s3/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s3/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s3/?action=removeservicelink", "upgrade": "http://172.30.0.100:8080/v1/services/1s3/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s3/?action=addservicelink", "deactivate": "http://172.30.0.100:8080/v1/services/1s3/?action=deactivate" }, "name": "ghost", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T18:00:26Z", "createdTS": 1474567226000, "currentScale": 1, "description": null, "environmentId": "1e2", "externalId": null, "fqdn": null, "healthState": "healthy", "kind": "service", "launchConfig": { "imageUuid": "docker:ghost", "kind": "container", "labels": { "io.rancher.service.hash": "af9b5ddd2891271b12a88129e2bc37ff57998182" }, "logConfig": {}, "networkMode": "managed", "ports": [ "80:2368/tcp" ], "privileged": false, "publishAllPorts": false, "readOnly": false, "startOnCreate": true, "stdinOpen": false, "tty": false, "version": "0", "vcpu": 1 }, "metadata": { "io.rancher.service.hash": "f49280e1f709117b76693b638834791e4f4ef0fd" }, "publicEndpoints": [ { "ipAddress": "172.30.0.101", "port": 80, "serviceId": "1s3", "hostId": "1h1", "instanceId": "1i5" } ], "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": false, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": null, "uuid": "c2ce7050-e5b5-42c7-a9fb-8ed8d33a4884", "vip": null }, { "id": "1s6", "type": "service", "links": { "self": "http://172.30.0.100:8080/v1/services/1s6", "account": "http://172.30.0.100:8080/v1/services/1s6/account", "consumedbyservices": "http://172.30.0.100:8080/v1/services/1s6/consumedbyservices", "consumedservices": "http://172.30.0.100:8080/v1/services/1s6/consumedservices", "environment": "http://172.30.0.100:8080/v1/services/1s6/environment", "instances": "http://172.30.0.100:8080/v1/services/1s6/instances", "serviceExposeMaps": "http://172.30.0.100:8080/v1/services/1s6/serviceexposemaps", "containerStats": "http://172.30.0.100:8080/v1/services/1s6/containerstats" }, "actions": { "update": "http://172.30.0.100:8080/v1/services/1s6/?action=update", "restart": "http://172.30.0.100:8080/v1/services/1s6/?action=restart", "remove": "http://172.30.0.100:8080/v1/services/1s6/?action=remove", "setservicelinks": "http://172.30.0.100:8080/v1/services/1s6/?action=setservicelinks", "removeservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=removeservicelink", "upgrade": "http://172.30.0.100:8080/v1/services/1s6/?action=upgrade", "addservicelink": "http://172.30.0.100:8080/v1/services/1s6/?action=addservicelink", "deactivate": "http://172.30.0.100:8080/v1/services/1s6/?action=deactivate" }, "name": "123", "state": "active", "accountId": "1a5", "assignServiceIpAddress": false, "createIndex": 1, "created": "2016-09-22T18:28:04Z", "createdTS": 1474568884000, "currentScale": 1, "description": null, "environmentId": "1e2", "externalId": null, "fqdn": null, "healthState": "healthy", "kind": "service", "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "metadata": null, "publicEndpoints": null, "removed": null, "retainIp": null, "scale": 1, "scalePolicy": null, "secondaryLaunchConfigs": [], "selectorContainer": null, "selectorLink": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "upgrade": { "inServiceStrategy": { "batchSize": 1, "intervalMillis": 2000, "launchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0bdea468-c3e9-4562-951c-d543958e966a", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousLaunchConfig": { "capAdd": [], "capDrop": [], "count": null, "cpuSet": null, "cpuShares": null, "dataVolumes": [], "dataVolumesFrom": [], "description": null, "devices": [], "dns": [], "dnsSearch": [], "domainName": null, "hostname": null, "imageUuid": "docker:ubuntu:trusty", "kind": "container", "labels": { "io.rancher.container.pull_image": "always" }, "logConfig": { "config": {}, "driver": "" }, "memory": null, "memoryMb": null, "memorySwap": null, "networkMode": "managed", "pidMode": null, "ports": [], "privileged": false, "publishAllPorts": false, "readOnly": false, "requestedIpAddress": null, "startOnCreate": true, "stdinOpen": true, "tty": true, "user": null, "userdata": null, "version": "0", "volumeDriver": null, "workingDir": null, "dataVolumesFromLaunchConfigs": [], "networkLaunchConfig": null, "vcpu": 1 }, "previousSecondaryLaunchConfigs": [], "secondaryLaunchConfigs": [], "startFirst": true }, "toServiceStrategy": null }, "uuid": "c0ae4d08-e20a-45ef-9fb9-ad9f7cdeeb15", "vip": null } ], "sortLinks": { "accountId": "http://172.30.0.100:8080/v1/services?limit=4&sort=accountId", "createIndex": "http://172.30.0.100:8080/v1/services?limit=4&sort=createIndex", "created": "http://172.30.0.100:8080/v1/services?limit=4&sort=created", "description": "http://172.30.0.100:8080/v1/services?limit=4&sort=description", "environmentId": "http://172.30.0.100:8080/v1/services?limit=4&sort=environmentId", "externalId": "http://172.30.0.100:8080/v1/services?limit=4&sort=externalId", "healthState": "http://172.30.0.100:8080/v1/services?limit=4&sort=healthState", "id": "http://172.30.0.100:8080/v1/services?limit=4&sort=id", "kind": "http://172.30.0.100:8080/v1/services?limit=4&sort=kind", "name": "http://172.30.0.100:8080/v1/services?limit=4&sort=name", "removeTime": "http://172.30.0.100:8080/v1/services?limit=4&sort=removeTime", "removed": "http://172.30.0.100:8080/v1/services?limit=4&sort=removed", "selectorContainer": "http://172.30.0.100:8080/v1/services?limit=4&sort=selectorContainer", "selectorLink": "http://172.30.0.100:8080/v1/services?limit=4&sort=selectorLink", "state": "http://172.30.0.100:8080/v1/services?limit=4&sort=state", "uuid": "http://172.30.0.100:8080/v1/services?limit=4&sort=uuid", "vip": "http://172.30.0.100:8080/v1/services?limit=4&sort=vip" }, "pagination": { "first": null, "previous": null, "next": "http://172.30.0.100:8080/v1/services?limit=4&marker=m4", "limit": 4, "total": null, "partial": true }, "sort": null, "filters": { "accountId": null, "createIndex": null, "created": null, "description": null, "environmentId": null, "externalId": null, "healthState": null, "id": null, "kind": null, "name": null, "removeTime": null, "removed": null, "selectorContainer": null, "selectorLink": null, "state": null, "uuid": null, "vip": null }, "createDefaults": {} }apache-libcloud-2.2.1/libcloud/test/container/fixtures/rancher/ex_list_stacks.json0000664000175000017500000004224213153541406030367 0ustar kamikami00000000000000{ "type": "collection", "resourceType": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments" }, "createTypes": { "composeProject": "http://172.30.0.100:8080/v1/composeprojects", "environment": "http://172.30.0.100:8080/v1/environments" }, "actions": {}, "data": [ { "id": "1e1", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e1", "account": "http://172.30.0.100:8080/v1/environments/1e1/account", "services": "http://172.30.0.100:8080/v1/environments/1e1/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e1/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e1/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e1/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e1/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e1/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e1/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e1/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e1/?action=exportconfig" }, "name": "Concrete5", "state": "active", "accountId": "1a5", "created": "2016-09-22T17:58:53Z", "createdTS": 1474567133000, "description": null, "dockerCompose": "CMSMysql:\r\n environment:\r\n MYSQL_ROOT_PASSWORD: ${root_password}\r\n MYSQL_DATABASE: ${db_name}\r\n MYSQL_USER: ${db_username}\r\n MYSQL_PASSWORD: ${db_password}\r\n labels:\r\n io.rancher.container.pull_image: always\r\n tty: true\r\n image: mysql\r\n volumes:\r\n - ${db_data_location}:/var/lib/mysql\r\n stdin_open: true\r\n volume_driver: ${volume_driver}\r\n\r\nCMSConfig:\r\n image: opensaas/concrete5\r\n tty: true\r\n stdin_open: true\r\n links:\r\n - CMSMysql:mysql\r\n volumes:\r\n - ${cms_application_data}:/var/www/html/application\r\n - ${cms_packages_data}:/var/www/html/packages\r\n labels:\r\n io.rancher.container.hostname_override: container_name\r\n io.rancher.container.start_once: true\r\n volume_driver: ${volume_driver}\r\n command: bash -c \"chown -R www-data. application; chown -R www-data. packages; sleep 2m; php -f concrete/bin/concrete5.php c5:install --db-server=mysql --db-username=${db_username} --db-password=${db_password} --db-database=${db_name} --site=${cms_sitename} --admin-email=${cms_admin_email} --admin-password=${cms_admin_password} -n -vvv\"\r\n\r\nConcrete5App:\r\n labels:\r\n io.rancher.container.pull_image: always\r\n io.rancher.sidekicks: CMSConfig\r\n tty: true\r\n links:\r\n - CMSMysql:mysql\r\n image: opensaas/concrete5\r\n volumes:\r\n - ${cms_application_data}:/var/www/html/application\r\n - ${cms_packages_data}:/var/www/html/packages\r\n volume_driver: ${volume_driver}\r\n stdin_open: true", "environment": { "root_password": "password", "db_name": "CMS_DB", "db_username": "CMS_USER", "db_password": "password", "db_data_location": "CMS_DB_DATA", "volume_driver": "", "cms_application_data": "CMS_APP_DATA", "cms_packages_data": "CMS_PACK_DATA", "cms_admin_email": "admin@example.com", "cms_admin_password": "password", "cms_sitename": "MySite" }, "externalId": "catalog://community:Concrete5:1", "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": ".catalog:\r\n name: \"Concrete5\"\r\n version: \"v0.2\"\r\n description: \"Concrete5 CMS\"\r\n uuid: Concrete5-1\r\n minimum_rancher_version: v0.51.0\r\n questions:\r\n - variable: root_password\r\n description: \"MySQL root password - keep this password complex and secure\"\r\n label: \"MySQL Root Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: db_name\r\n description: \"MySQL Database Name - to use in the Concrete5 setup\"\r\n label: \"MySQL Database Name\"\r\n required: true\r\n default: \"CMS_DB\"\r\n type: \"string\"\r\n - variable: db_username\r\n description: \"MySQL Username - to use in the Concrete5 setup\"\r\n label: \"MySQL Username\"\r\n required: true\r\n default: \"CMS_USER\"\r\n type: \"string\"\r\n - variable: db_password\r\n description: \"MySQL password for the above user - to use in the Concrete5 setup\"\r\n label: \"MySQL Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: db_data_location\r\n description: \"Location on the host for the database files\"\r\n label: \"DB Data Location\"\r\n required: true\r\n default: \"CMS_DB_DATA\"\r\n type: \"string\"\r\n - variable: volume_driver\r\n description: \"Volume Driver for the persistant data locations requires docker 1.7\"\r\n label: \"Volume Driver\"\r\n type: \"string\"\r\n - variable: cms_application_data\r\n description: \"Concrcte5 application folder for persistant data storage\"\r\n label: \"Application Folder\"\r\n required: true\r\n default: \"CMS_APP_DATA\"\r\n type: \"string\"\r\n - variable: cms_packages_data\r\n description: \"Concrcte5 packages folder for persistant data storage\"\r\n label: \"Packages Folder\"\r\n required: true\r\n default: \"CMS_PACK_DATA\"\r\n type: \"string\"\r\n - variable: cms_admin_email\r\n description: \"Concrcete5 admin email address\"\r\n label: \"Admin Email\"\r\n required: true\r\n default: \"admin@example.com\"\r\n type: \"string\"\r\n - variable: cms_admin_password\r\n description: \"Concrcete5 admin password\"\r\n label: \"Admin Password\"\r\n required: true\r\n default: \"password\"\r\n type: \"string\"\r\n - variable: cms_sitename\r\n description: \"Concrcete5 Sitename\"\r\n label: \"Sitename\"\r\n required: true\r\n default: \"MySite\"\r\n type: \"string\"\r\nCMSMysql:\r\n scale: 1\r\nConcrete5App:\r\n scale: 1\r\n\r\n", "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "e129ed27-3823-4e2e-af62-4565d68995d4" }, { "id": "1e2", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e2", "account": "http://172.30.0.100:8080/v1/environments/1e2/account", "services": "http://172.30.0.100:8080/v1/environments/1e2/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e2/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e2/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e2/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e2/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e2/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e2/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e2/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e2/?action=exportconfig" }, "name": "ghost", "state": "active", "accountId": "1a5", "created": "2016-09-22T18:00:25Z", "createdTS": 1474567225000, "description": null, "dockerCompose": "ghost:\n image: ghost\n ports:\n - ${public_port}:2368\n", "environment": { "public_port": "80" }, "externalId": "catalog://community:ghost:0", "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": ".catalog:\n name: \"Ghost\"\n version: \"v0.1-educaas\"\n description: \"Blog powered by Ghost. Requires no database\"\n uuid: ghost-0\n minimum_rancher_version: v0.51.0\n questions:\n - variable: public_port\n description: \"public port to access the blog\"\n label: \"Public Port\"\n required: true\n default: \"80\"\n type: \"int\"\nghost:\n", "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "4c335202-2ca7-41f1-8702-4984518566ec" }, { "id": "1e5", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e5", "account": "http://172.30.0.100:8080/v1/environments/1e5/account", "services": "http://172.30.0.100:8080/v1/environments/1e5/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e5/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e5/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e5/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e5/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e5/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e5/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e5/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e5/?action=exportconfig" }, "name": "heyk", "state": "active", "accountId": "1a5", "created": "2016-09-29T20:40:14Z", "createdTS": 1475181614000, "description": null, "dockerCompose": null, "environment": null, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "d5a39304-9247-4a31-8662-7dba98238105" }, { "id": "1e6", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e6", "account": "http://172.30.0.100:8080/v1/environments/1e6/account", "services": "http://172.30.0.100:8080/v1/environments/1e6/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e6/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e6/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e6/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e6/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e6/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e6/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e6/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e6/?action=exportconfig" }, "name": "ilikegurls", "state": "active", "accountId": "1a5", "created": "2016-10-06T00:40:23Z", "createdTS": 1475714423000, "description": null, "dockerCompose": null, "environment": null, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "19101511-6dcb-49f7-a8e1-063bcded1956" }, { "id": "1e7", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e7", "account": "http://172.30.0.100:8080/v1/environments/1e7/account", "services": "http://172.30.0.100:8080/v1/environments/1e7/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e7/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e7/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e7/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e7/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e7/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e7/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e7/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e7/?action=exportconfig" }, "name": "maybethisyear", "state": "active", "accountId": "1a5", "created": "2016-10-06T00:41:17Z", "createdTS": 1475714477000, "description": null, "dockerCompose": null, "environment": null, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "250c66f6-cd36-4c12-8767-deb5c35d0a05" }, { "id": "1e8", "type": "environment", "links": { "self": "http://172.30.0.100:8080/v1/environments/1e8", "account": "http://172.30.0.100:8080/v1/environments/1e8/account", "services": "http://172.30.0.100:8080/v1/environments/1e8/services", "composeConfig": "http://172.30.0.100:8080/v1/environments/1e8/composeconfig" }, "actions": { "upgrade": "http://172.30.0.100:8080/v1/environments/1e8/?action=upgrade", "update": "http://172.30.0.100:8080/v1/environments/1e8/?action=update", "remove": "http://172.30.0.100:8080/v1/environments/1e8/?action=remove", "addoutputs": "http://172.30.0.100:8080/v1/environments/1e8/?action=addoutputs", "activateservices": "http://172.30.0.100:8080/v1/environments/1e8/?action=activateservices", "deactivateservices": "http://172.30.0.100:8080/v1/environments/1e8/?action=deactivateservices", "exportconfig": "http://172.30.0.100:8080/v1/environments/1e8/?action=exportconfig" }, "name": "oh-another", "state": "active", "accountId": "1a5", "created": "2016-10-06T00:51:34Z", "createdTS": 1475715094000, "description": null, "dockerCompose": null, "environment": null, "externalId": null, "healthState": "healthy", "kind": "environment", "outputs": null, "previousEnvironment": null, "previousExternalId": null, "rancherCompose": null, "removed": null, "startOnCreate": true, "transitioning": "no", "transitioningMessage": null, "transitioningProgress": null, "uuid": "a777dd5e-e524-43c8-8783-d7bb14bcffb6" } ], "sortLinks": { "accountId": "http://172.30.0.100:8080/v1/environments?limit=6&sort=accountId", "created": "http://172.30.0.100:8080/v1/environments?limit=6&sort=created", "description": "http://172.30.0.100:8080/v1/environments?limit=6&sort=description", "externalId": "http://172.30.0.100:8080/v1/environments?limit=6&sort=externalId", "healthState": "http://172.30.0.100:8080/v1/environments?limit=6&sort=healthState", "id": "http://172.30.0.100:8080/v1/environments?limit=6&sort=id", "kind": "http://172.30.0.100:8080/v1/environments?limit=6&sort=kind", "name": "http://172.30.0.100:8080/v1/environments?limit=6&sort=name", "removeTime": "http://172.30.0.100:8080/v1/environments?limit=6&sort=removeTime", "removed": "http://172.30.0.100:8080/v1/environments?limit=6&sort=removed", "state": "http://172.30.0.100:8080/v1/environments?limit=6&sort=state", "uuid": "http://172.30.0.100:8080/v1/environments?limit=6&sort=uuid" }, "pagination": { "first": null, "previous": null, "next": "http://172.30.0.100:8080/v1/environments?limit=6&marker=m6", "limit": 6, "total": null, "partial": true }, "sort": null, "filters": { "accountId": null, "created": null, "description": null, "externalId": null, "healthState": [ { "modifier": "eq", "value": "healthy" } ], "id": null, "kind": null, "name": null, "removeTime": null, "removed": null, "state": null, "uuid": null }, "createDefaults": {} }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/0000775000175000017500000000000013160535110023572 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/stoptask.json0000664000175000017500000000257612701023453026351 0ustar kamikami00000000000000{ "task": { "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/default", "containerInstanceArn": "arn:aws:ecs:us-east-1:012345678910:container-instance/8db248d6-16a7-42b5-b9f9-43d3b1ad9430", "containers": [ { "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/05a5528c-77f6-4e5b-8f9a-2b0a1928a926", "lastStatus": "RUNNING", "name": "mysql", "networkBindings": [], "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/a126249b-b7e4-4b06-9d8f-1b56e75a99b5" }, { "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/37234a82-77f6-41d7-b54b-591f1e278093", "lastStatus": "RUNNING", "name": "wordpress", "networkBindings": [ { "bindIP": "0.0.0.0", "containerPort": 80, "hostPort": 80 } ], "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/a126249b-b7e4-4b06-9d8f-1b56e75a99b5" } ], "desiredStatus": "STOPPED", "lastStatus": "RUNNING", "overrides": { "containerOverrides": [ { "name": "mysql" }, { "name": "wordpress" } ] }, "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/a126249b-b7e4-4b06-9d8f-1b56e75a99b5", "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/deletecluster.json0000664000175000017500000000050212701023453027330 0ustar kamikami00000000000000{ "cluster": { "activeServicesCount": 0, "clusterArn": "arn:aws:ecs:ap-southeast-2:647433528374:cluster/my-cluster", "clusterName": "my-cluster", "pendingTasksCount": 0, "registeredContainerInstancesCount": 0, "runningTasksCount": 0, "status": "INACTIVE" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/listimages.json0000664000175000017500000000025512701023453026632 0ustar kamikami00000000000000{ "imageIds": [{ "imageDigest": "sha256:9bacaf947ed397fcc9afb7359a1a8eaa1f6944ba8cd4ddca1c69bdcf4acf12a2", "imageTag": "latest" } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/createcluster.json0000664000175000017500000000050012701023453027327 0ustar kamikami00000000000000{ "cluster": { "activeServicesCount": 0, "clusterArn": "arn:aws:ecs:ap-southeast-2:647433528374:cluster/my-cluster", "clusterName": "my-cluster", "pendingTasksCount": 0, "registeredContainerInstancesCount": 0, "runningTasksCount": 0, "status": "ACTIVE" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/listtasks.json0000664000175000017500000000054612701023453026515 0ustar kamikami00000000000000{ "taskArns": [ "arn:aws:ecs:us-east-1:012345678910:task/0b69d5c0-d655-4695-98cd-5d2d526d9d5a", "arn:aws:ecs:us-east-1:012345678910:task/51a01bdf-d00e-487e-ab14-7645330b6207", "arn:aws:ecs:us-east-1:012345678910:task/b0b28bb8-2be3-4810-b52b-88df129d893c", "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe" ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/runtask.json0000664000175000017500000000222512701023453026157 0ustar kamikami00000000000000{ "failures": [], "tasks": [{ "clusterArn": "arn:aws:ecs:ap-southeast-2:647433528374:cluster/default", "containerInstanceArn": "arn:aws:ecs:ap-southeast-2:647433528374:container-instance/13b83f4b-d557-48a6-a4d7-2b0e8068e62b", "containers": [{ "containerArn": "arn:aws:ecs:ap-southeast-2:647433528374:container/e443d10f-dea3-481e-8a1e-966b9ad4e498", "lastStatus": "PENDING", "name": "my-simple-app", "taskArn": "arn:aws:ecs:ap-southeast-2:647433528374:task/b7c76236-b96f-4de1-93c8-9da3c30ccc23" } ], "createdAt": 1.45181726008E9, "desiredStatus": "RUNNING", "lastStatus": "PENDING", "overrides": { "containerOverrides": [{ "name": "my-simple-app" } ] }, "taskArn": "arn:aws:ecs:ap-southeast-2:647433528374:task/b7c76236-b96f-4de1-93c8-9da3c30ccc23", "taskDefinitionArn": "arn:aws:ecs:ap-southeast-2:647433528374:task-definition/my-simple-app:1" } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/listclusters.json0000664000175000017500000000021012701023453027220 0ustar kamikami00000000000000{"clusterArns":["arn:aws:ecs:ap-southeast-2:647433528374:cluster/my-cluster","arn:aws:ecs:ap-southeast-2:647433528374:cluster/default"]}apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/createservice.json0000664000175000017500000000156512701023453027322 0ustar kamikami00000000000000{ "service": { "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/default", "deploymentConfiguration": { "maximumPercent": 200, "minimumHealthyPercent": 100 }, "deployments": [ { "createdAt": 1430326887.362, "desiredCount": 10, "id": "ecs-svc/9223370606527888445", "pendingCount": 0, "runningCount": 0, "status": "PRIMARY", "taskDefinition": "arn:aws:ecs:us-east-1:012345678910:task-definition/ecs-demo:1", "updatedAt": 1430326887.362 } ], "desiredCount": 10, "events": [], "loadBalancers": [], "pendingCount": 0, "runningCount": 0, "serviceArn": "arn:aws:ecs:us-east-1:012345678910:service/test", "serviceName": "test", "status": "ACTIVE", "taskDefinition": "arn:aws:ecs:us-east-1:012345678910:task-definition/ecs-demo:1" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/listservices.json0000664000175000017500000000023412701023453027205 0ustar kamikami00000000000000{ "serviceArns": [ "arn:aws:ecs:us-east-1:012345678910:service/hello_world", "arn:aws:ecs:us-east-1:012345678910:service/ecs-simple-service" ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/describeservices.json0000664000175000017500000000173512701023453030021 0ustar kamikami00000000000000{ "failures": [], "services": [ { "clusterArn": "arn:aws:ecs:us-west-2:012345678910:cluster/telemetry", "deploymentConfiguration": { "maximumPercent": 200, "minimumHealthyPercent": 100 }, "deployments": [ { "createdAt": 1432829320.611, "desiredCount": 4, "id": "ecs-svc/9223370604025455196", "pendingCount": 0, "runningCount": 4, "status": "PRIMARY", "taskDefinition": "arn:aws:ecs:us-west-2:012345678910:task-definition/hpcc-t2-medium:1", "updatedAt": 1432829320.611 } ], "desiredCount": 4, "events": [], "loadBalancers": [], "pendingCount": 0, "runningCount": 4, "serviceArn": "arn:aws:ecs:us-west-2:012345678910:service/bunker-buster", "serviceName": "test", "status": "ACTIVE", "taskDefinition": "arn:aws:ecs:us-west-2:012345678910:task-definition/hpcc-t2-medium:1" } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/deleteservice.json0000664000175000017500000000156712701023453027323 0ustar kamikami00000000000000{ "service": { "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/default", "deploymentConfiguration": { "maximumPercent": 200, "minimumHealthyPercent": 100 }, "deployments": [ { "createdAt": 1430320735.285, "desiredCount": 0, "id": "ecs-svc/9223370606534040511", "pendingCount": 0, "runningCount": 0, "status": "PRIMARY", "taskDefinition": "arn:aws:ecs:us-east-1:012345678910:task-definition/sleep360:27", "updatedAt": 1430320735.285 } ], "desiredCount": 0, "events": [], "loadBalancers": [], "pendingCount": 0, "runningCount": 0, "serviceArn": "arn:aws:ecs:us-east-1:012345678910:service/test", "serviceName": "test", "status": "DRAINING", "taskDefinition": "arn:aws:ecs:us-east-1:012345678910:task-definition/sleep360:27" } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/getauthorizationtoken.json0000664000175000017500000000032712701023453031132 0ustar kamikami00000000000000{ "authorizationData": [ { "authorizationToken": "QVdTOkNpQzErSHF1ZXZPcUR...", "expiresAt": 1448878779.809, "proxyEndpoint": "https://012345678910.dkr.ecr.us-east-1.amazonaws.com" } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/describerepositories.json0000664000175000017500000000032612701023453030720 0ustar kamikami00000000000000{ "repositories": [{ "registryId": "647433528374", "repositoryArn": "arn:aws:ecr:us-east-1:647433528374:repository/my-images", "repositoryName": "my-images" } ] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/describeclusters.json0000664000175000017500000000050212701023453030031 0ustar kamikami00000000000000{ "clusters": [ { "activeServicesCount": 1, "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/default", "clusterName": "default", "pendingTasksCount": 0, "registeredContainerInstancesCount": 0, "runningTasksCount": 0, "status": "ACTIVE" } ], "failures": [] }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/registertaskdefinition.json0000664000175000017500000000115212701023453031246 0ustar kamikami00000000000000{ "taskDefinition": { "containerDefinitions": [{ "cpu": 10, "environment": [], "essential": true, "image": "simple-app", "memory": 500, "mountPoints": [], "name": "my-simple-app", "portMappings": [], "volumesFrom": [] } ], "family": "my-simple-app", "revision": 1, "status": "ACTIVE", "taskDefinitionArn": "arn:aws:ecs:ap-southeast-2:647433528374:task-definition/my-simple-app:1", "volumes": [] } }apache-libcloud-2.2.1/libcloud/test/container/fixtures/ecs/describetasks.json0000664000175000017500000000302512701023453027315 0ustar kamikami00000000000000{ "failures": [], "tasks": [{ "clusterArn": "arn:aws:ecs:ap-southeast-2:647433528374:cluster/default", "containerInstanceArn": "arn:aws:ecs:ap-southeast-2:647433528374:container-instance/13b83f4b-d557-48a6-a4d7-2b0e8068e62b", "containers": [{ "containerArn": "arn:aws:ecs:ap-southeast-2:647433528374:container/d56d4e2c-9804-42a7-9f2a-6029cb50d4a2", "lastStatus": "RUNNING", "name": "simple-app", "networkBindings": [{ "bindIP": "0.0.0.0", "containerPort": 80, "hostPort": 80, "protocol": "tcp" } ], "taskArn": "arn:aws:ecs:ap-southeast-2:647433528374:task/c15bcab8-39e6-4c28-a47d-27b433269e5c" } ], "createdAt": 1.451468104403E9, "desiredStatus": "RUNNING", "lastStatus": "RUNNING", "overrides": { "containerOverrides": [{ "name": "simple-app" } ] }, "startedAt": 1.45146812139E9, "startedBy": "ecs-svc/9223370585386692588", "taskArn": "arn:aws:ecs:ap-southeast-2:647433528374:task/c15bcab8-39e6-4c28-a47d-27b433269e5c", "taskDefinitionArn": "arn:aws:ecs:ap-southeast-2:647433528374:task-definition/console-sample-app-static:1" } ] }apache-libcloud-2.2.1/libcloud/test/container/test_kubernetes.py0000664000175000017500000001162213153541406024740 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.test import unittest from libcloud.container.base import ContainerImage from libcloud.container.drivers.kubernetes import KubernetesContainerDriver from libcloud.utils.py3 import httplib from libcloud.test.secrets import CONTAINER_PARAMS_KUBERNETES from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test import MockHttp class KubernetesContainerDriverTestCase(unittest.TestCase): def setUp(self): KubernetesContainerDriver.connectionCls.conn_class = KubernetesMockHttp KubernetesMockHttp.type = None KubernetesMockHttp.use_param = 'a' self.driver = KubernetesContainerDriver(*CONTAINER_PARAMS_KUBERNETES) def test_list_containers(self): containers = self.driver.list_containers() self.assertEqual(len(containers), 1) self.assertEqual(containers[0].id, 'docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36') self.assertEqual(containers[0].name, 'hello-world') def test_list_clusters(self): clusters = self.driver.list_clusters() self.assertEqual(len(clusters), 2) self.assertEqual(clusters[0].id, 'default') self.assertEqual(clusters[0].name, 'default') def test_get_cluster(self): cluster = self.driver.get_cluster('default') self.assertEqual(cluster.id, 'default') self.assertEqual(cluster.name, 'default') def test_create_cluster(self): cluster = self.driver.create_cluster('test') self.assertEqual(cluster.id, 'test') self.assertEqual(cluster.name, 'test') def test_destroy_cluster(self): cluster = self.driver.get_cluster('default') result = self.driver.destroy_cluster(cluster) self.assertTrue(result) def test_deploy_container(self): image = ContainerImage( id=None, name='hello-world', path=None, driver=self.driver, version=None ) container = self.driver.deploy_container('hello-world', image=image) self.assertEqual(container.name, 'hello-world') def test_get_container(self): container = self.driver.get_container('docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36') assert container.id == 'docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36' class KubernetesMockHttp(MockHttp): fixtures = ContainerFileFixtures('kubernetes') def _api_v1_pods( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_api_v1_pods.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_v1_namespaces( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_api_v1_namespaces.json') elif method == 'POST': body = self.fixtures.load('_api_v1_namespaces_test.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_v1_namespaces_default( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_api_v1_namespaces_default.json') elif method == 'DELETE': body = self.fixtures.load('_api_v1_namespaces_default_DELETE.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_v1_namespaces_default_pods( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('_api_v1_namespaces_default_pods.json') elif method == 'POST': body = self.fixtures.load('_api_v1_namespaces_default_pods_POST.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/test_ecs.py0000664000175000017500000001712413153541406023346 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.test import unittest from libcloud.container.base import ContainerCluster, ContainerImage, Container from libcloud.container.drivers.ecs import ElasticContainerDriver from libcloud.container.utils.docker import RegistryClient from libcloud.utils.py3 import httplib from libcloud.test.secrets import CONTAINER_PARAMS_ECS from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test import MockHttp class ElasticContainerDriverTestCase(unittest.TestCase): def setUp(self): ElasticContainerDriver.connectionCls.conn_class = ECSMockHttp ECSMockHttp.type = None ECSMockHttp.use_param = 'a' ElasticContainerDriver.ecrConnectionClass.conn_class = ECSMockHttp self.driver = ElasticContainerDriver(*CONTAINER_PARAMS_ECS) def test_list_clusters(self): clusters = self.driver.list_clusters() self.assertEqual(len(clusters), 1) self.assertEqual(clusters[0].id, 'arn:aws:ecs:us-east-1:012345678910:cluster/default') self.assertEqual(clusters[0].name, 'default') def test_create_cluster(self): cluster = self.driver.create_cluster('my-cluster') self.assertEqual(cluster.name, 'my-cluster') def test_destroy_cluster(self): self.assertTrue( self.driver.destroy_cluster( ContainerCluster( id='arn:aws:ecs:us-east-1:012345678910:cluster/jim', name='jim', driver=self.driver))) def test_list_containers(self): containers = self.driver.list_containers() self.assertEqual(len(containers), 1) def test_list_containers_for_cluster(self): cluster = self.driver.list_clusters()[0] containers = self.driver.list_containers(cluster=cluster) self.assertEqual(len(containers), 1) def test_deploy_container(self): container = self.driver.deploy_container( name='jim', image=ContainerImage( id=None, name='mysql', path='mysql', version=None, driver=self.driver ) ) self.assertEqual(container.id, 'arn:aws:ecs:ap-southeast-2:647433528374:container/e443d10f-dea3-481e-8a1e-966b9ad4e498') def test_get_container(self): container = self.driver.get_container( 'arn:aws:ecs:us-east-1:012345678910:container/76c980a8-2454-4a9c-acc4-9eb103117273' ) self.assertEqual(container.id, 'arn:aws:ecs:ap-southeast-2:647433528374:container/d56d4e2c-9804-42a7-9f2a-6029cb50d4a2') self.assertEqual(container.name, 'simple-app') self.assertEqual(container.image.name, 'simple-app') def test_start_container(self): container = self.driver.start_container( Container( id=None, name=None, image=None, state=None, ip_addresses=None, driver=self.driver, extra={ 'taskDefinitionArn': '' } ) ) self.assertFalse(container is None) def test_stop_container(self): container = self.driver.stop_container( Container( id=None, name=None, image=None, state=None, ip_addresses=None, driver=self.driver, extra={ 'taskArn': '12345', 'taskDefinitionArn': '123556' } ) ) self.assertFalse(container is None) def test_restart_container(self): container = self.driver.restart_container( Container( id=None, name=None, image=None, state=None, ip_addresses=None, driver=self.driver, extra={ 'taskArn': '12345', 'taskDefinitionArn': '123556' } ) ) self.assertFalse(container is None) def test_list_images(self): images = self.driver.list_images('my-images') self.assertEqual(len(images), 1) self.assertEqual(images[0].name, '647433528374.dkr.ecr.region.amazonaws.com/my-images:latest') def test_ex_create_service(self): cluster = self.driver.list_clusters()[0] task_definition = self.driver.list_containers()[0].extra['taskDefinitionArn'] service = self.driver.ex_create_service(cluster=cluster, name='jim', task_definition=task_definition) self.assertEqual(service['serviceName'], 'test') def test_ex_list_service_arns(self): arns = self.driver.ex_list_service_arns() self.assertEqual(len(arns), 2) def test_ex_describe_service(self): arn = self.driver.ex_list_service_arns()[0] service = self.driver.ex_describe_service(arn) self.assertEqual(service['serviceName'], 'test') def test_ex_destroy_service(self): arn = self.driver.ex_list_service_arns()[0] service = self.driver.ex_destroy_service(arn) self.assertEqual(service['status'], 'DRAINING') def test_ex_get_registry_client(self): client = self.driver.ex_get_registry_client('my-images') self.assertIsInstance(client, RegistryClient) class ECSMockHttp(MockHttp): fixtures = ContainerFileFixtures('ecs') fixture_map = { 'DescribeClusters': 'describeclusters.json', 'CreateCluster': 'createcluster.json', 'DeleteCluster': 'deletecluster.json', 'DescribeTasks': 'describetasks.json', 'ListTasks': 'listtasks.json', 'ListClusters': 'listclusters.json', 'RegisterTaskDefinition': 'registertaskdefinition.json', 'RunTask': 'runtask.json', 'StopTask': 'stoptask.json', 'ListImages': 'listimages.json', 'DescribeRepositories': 'describerepositories.json', 'CreateService': 'createservice.json', 'ListServices': 'listservices.json', 'DescribeServices': 'describeservices.json', 'DeleteService': 'deleteservice.json', 'GetAuthorizationToken': 'getauthorizationtoken.json' } def root( self, method, url, body, headers): target = headers['x-amz-target'] if target is not None: type = target.split('.')[-1] if type is None or self.fixture_map.get(type) is None: raise AssertionError('Unsupported request type %s' % (target)) body = self.fixtures.load(self.fixture_map.get(type)) else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/test_gke.py0000664000175000017500000000711313153541406023337 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for Google Container Engine Driver """ import sys import unittest from libcloud.utils.py3 import httplib from libcloud.container.drivers.gke import GKEContainerDriver, API_VERSION from libcloud.common.google import (GoogleBaseAuthConnection) from libcloud.test.common.test_google import GoogleAuthMockHttp, GoogleTestCase from libcloud.test import MockHttp from libcloud.test.container import TestCaseMixin from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test.secrets import GKE_PARAMS, GKE_KEYWORD_PARAMS class GKEContainerDriverTestCase(GoogleTestCase, TestCaseMixin): """ Google Compute Engine Test Class. """ # Mock out a few specific calls that interact with the user, system or # environment. datacenter = 'us-central1-a' def setUp(self): GKEMockHttp.test = self GKEContainerDriver.connectionCls.conn_class = GKEMockHttp GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp GKEMockHttp.type = None kwargs = GKE_KEYWORD_PARAMS.copy() kwargs['auth_type'] = 'IA' kwargs['datacenter'] = self.datacenter self.driver = GKEContainerDriver(*GKE_PARAMS, **kwargs) def test_list_images_response(self): config = self.driver.list_clusters(ex_zone="us-central1-a") assert "clusters" in config assert config["clusters"][0]["zone"] == "us-central1-a" def test_server_config(self): config = self.driver.get_server_config() assert "validImageTypes" in config class GKEMockHttp(MockHttp): fixtures = ContainerFileFixtures('gke') json_hdr = {'content-type': 'application/json; charset=UTF-8'} def _get_method_name(self, type, use_param, qs, path): api_path = '/%s' % API_VERSION project_path = '/projects/%s' % GKE_KEYWORD_PARAMS['project'] path = path.replace(api_path, '') # This replace is separate, since there is a call with a different # project name path = path.replace(project_path, '') # The path to get project information is the base path, so use a fake # '/project' path instead if not path: path = '/project' method_name = super(GKEMockHttp, self)._get_method_name( type, use_param, qs, path) return method_name def _zones_us_central1_a_serverconfig(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_instance_serverconfig.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_clusters(self, method, url, body, headers): body = self.fixtures.load( 'zones_us-central1-a_list.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/test_rancher.py0000664000175000017500000003202013153541406024206 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.test import unittest from libcloud.container.base import ContainerImage from libcloud.container.drivers.rancher import RancherContainerDriver from libcloud.utils.py3 import httplib from libcloud.test.secrets import CONTAINER_PARAMS_RANCHER from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test import MockHttp # --------------------------------------------------------------------------- # # Mock Classes class RancherMockHttp(MockHttp): fixtures = ContainerFileFixtures('rancher') def _v1_environments(self, method, url, body, headers): if method == 'GET': return (httplib.OK, self.fixtures.load('ex_list_stacks.json'), {}, httplib.responses[httplib.OK]) else: return (httplib.OK, self.fixtures.load('ex_deploy_stack.json'), {}, httplib.responses[httplib.OK]) def _v1_environments_1e9(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ex_deploy_stack.json'), {}, httplib.responses[httplib.OK]) def _v1_environments_1e10(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ex_destroy_stack.json'), {}, httplib.responses[httplib.OK]) def _v1_environments_1e1(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ex_activate_stack.json'), {}, httplib.responses[httplib.OK]) def _v1_services(self, method, url, body, headers): if '?healthState=healthy' in url: return (httplib.OK, self.fixtures.load('ex_search_services.json'), {}, httplib.responses[httplib.OK]) elif method == 'GET': return (httplib.OK, self.fixtures.load('ex_list_services.json'), {}, httplib.responses[httplib.OK]) else: return (httplib.OK, self.fixtures.load('ex_deploy_service.json'), {}, httplib.responses[httplib.OK]) def _v1_services_1s13(self, method, url, body, headers): if method == 'GET': return (httplib.OK, self.fixtures.load('ex_deploy_service.json'), {}, httplib.responses[httplib.OK]) elif method == 'DELETE': return (httplib.OK, self.fixtures.load('ex_destroy_service.json'), {}, httplib.responses[httplib.OK]) def _v1_services_1s6(self, method, url, body, headers): return (httplib.OK, self.fixtures.load('ex_activate_service.json'), {}, httplib.responses[httplib.OK]) def _v1_containers(self, method, url, body, headers): if '?state=running' in url: return (httplib.OK, self.fixtures.load('ex_search_containers.json'), {}, httplib.responses[httplib.OK]) elif method == 'POST': return (httplib.OK, self.fixtures.load('deploy_container.json'), {}, httplib.responses[httplib.OK]) return (httplib.OK, self.fixtures.load('list_containers.json'), {}, httplib.responses[httplib.OK]) def _v1_containers_1i31(self, method, url, body, headers): if method == 'GET': return (httplib.OK, self.fixtures.load('deploy_container.json'), {}, httplib.responses[httplib.OK]) elif method == 'DELETE' or '?action=stop' in url: return (httplib.OK, self.fixtures.load('stop_container.json'), {}, httplib.responses[httplib.OK]) elif '?action=start' in url: return (httplib.OK, self.fixtures.load('start_container.json'), {}, httplib.responses[httplib.OK]) else: return (httplib.OK, self.fixtures.load('deploy_container.json'), {}, httplib.responses[httplib.OK]) RancherContainerDriver.connectionCls.conn_class = RancherMockHttp RancherMockHttp.type = None RancherMockHttp.use_param = 'a' # --------------------------------------------------------------------------- # # Test Cases class RancherContainerDriverInitTestCase(unittest.TestCase): """ Tests for testing the different permutations of the driver initialization string. """ def test_full_url_string(self): """ Test a 'full' URL string, which contains a scheme, port, and base path. """ path = "http://myhostname:1234/base" driver = RancherContainerDriver(*CONTAINER_PARAMS_RANCHER, host=path) self.assertEqual(driver.secure, False) self.assertEqual(driver.connection.host, "myhostname") self.assertEqual(driver.connection.port, 1234) self.assertEqual(driver.baseuri, "/base") def test_url_string_no_port(self): """ Test a partial URL string, which contains a scheme, and base path. """ path = "http://myhostname/base" driver = RancherContainerDriver(*CONTAINER_PARAMS_RANCHER, host=path, port=1234) self.assertEqual(driver.secure, False) self.assertEqual(driver.connection.host, "myhostname") self.assertEqual(driver.connection.port, 1234) self.assertEqual(driver.baseuri, "/base") def test_url_string_no_scheme(self): """ Test a partial URL string, which contains a port, and base path. """ path = "myhostname:1234/base" driver = RancherContainerDriver(*CONTAINER_PARAMS_RANCHER, host=path) self.assertEqual(driver.secure, True) self.assertEqual(driver.connection.host, "myhostname") self.assertEqual(driver.connection.port, 1234) self.assertEqual(driver.baseuri, "/base") def test_url_string_no_base_path(self): """ Test a partial URL string, which contains a scheme, and a port. """ path = "http://myhostname:1234" driver = RancherContainerDriver(*CONTAINER_PARAMS_RANCHER, host=path) self.assertEqual(driver.secure, False) self.assertEqual(driver.connection.host, "myhostname") self.assertEqual(driver.connection.port, 1234) self.assertEqual(driver.baseuri, "/v%s" % driver.version) class RancherContainerDriverTestCase(unittest.TestCase): def setUp(self): self.driver = RancherContainerDriver(*CONTAINER_PARAMS_RANCHER) # Stacks def test_ex_list_stacks(self): stacks = self.driver.ex_list_stacks() self.assertEqual(len(stacks), 6) self.assertEqual(stacks[0]['id'], "1e1") def test_ex_deploy_stack(self): stack = self.driver.ex_deploy_stack(name="newstack", environment={ "root_password": "password" }) self.assertEqual(stack['id'], "1e9") self.assertEqual(stack['environment']['root_password'], "password") def test_ex_get_stack(self): # also uses ex_deploy_stack.json stack = self.driver.ex_get_stack("1e9") self.assertEqual(stack['id'], "1e9") self.assertEqual(stack['environment']['root_password'], "password") def test_ex_search_stacks(self): stacks = self.driver.ex_search_stacks({"healthState": "healthy"}) self.assertEqual(len(stacks), 6) self.assertEqual(stacks[0]['healthState'], "healthy") def test_ex_destroy_stack(self): response = self.driver.ex_destroy_stack("1e10") self.assertEqual(response, True) def test_ex_activate_stack(self): response = self.driver.ex_activate_stack("1e1") self.assertEqual(response, True) def test_ex_deactivate_stack(self): # also uses ex_activate_stack.json response = self.driver.ex_activate_stack("1e1") self.assertEqual(response, True) def test_ex_list_services(self): services = self.driver.ex_list_services() self.assertEqual(len(services), 4) self.assertEqual(services[0]['id'], "1s1") def test_ex_deploy_service(self): image = ContainerImage( id="hastebin", name="hastebin", path="rlister/hastebin", version="latest", driver=None ) service = self.driver.ex_deploy_service(name="newservice", environment_id="1e1", image=image, environment={ "root_password": "password" }) self.assertEqual(service['id'], "1s13") self.assertEqual(service['environmentId'], "1e6") self.assertEqual(service['launchConfig']['environment'] ['root_password'], "password") self.assertEqual(service['launchConfig']['imageUuid'], "docker:rlister/hastebin:latest") def test_ex_get_service(self): # also uses ex_deploy_service.json service = self.driver.ex_get_service("1s13") self.assertEqual(service['id'], "1s13") self.assertEqual(service['environmentId'], "1e6") self.assertEqual(service['launchConfig']['environment'] ['root_password'], "password") def test_ex_search_services(self): services = self.driver.ex_search_services({"healthState": "healthy"}) self.assertEqual(len(services), 2) self.assertEqual(services[0]['healthState'], "healthy") def test_ex_destroy_service(self): # Not sure how to do these with returns in mockhttp response = self.driver.ex_destroy_service("1s13") self.assertEqual(response, True) def test_ex_activate_service(self): response = self.driver.ex_activate_service("1s6") self.assertEqual(response, True) def test_ex_deactivate_service(self): # also uses ex_activate_service.json response = self.driver.ex_activate_service("1s6") self.assertEqual(response, True) def test_list_containers(self): containers = self.driver.list_containers() self.assertEqual(len(containers), 2) self.assertEqual(containers[0].id, "1i1") def test_deploy_container(self): container = self.driver.deploy_container( name='newcontainer', image=ContainerImage( id="hastebin", name="hastebin", path="rlister/hastebin", version="latest", driver=None ), environment={"STORAGE_TYPE": "file"}, networkMode="managed" ) self.assertEqual(container.id, '1i31') self.assertEqual(container.name, 'newcontainer') self.assertEqual(container.extra['environment'], {'STORAGE_TYPE': 'file'}) def test_get_container(self): # also uses ex_deploy_container.json container = self.driver.get_container("1i31") self.assertEqual(container.id, '1i31') self.assertEqual(container.name, 'newcontainer') self.assertEqual(container.extra['environment'], {'STORAGE_TYPE': 'file'}) def test_start_container(self): container = self.driver.get_container("1i31") started = container.start() self.assertEqual(started.id, "1i31") self.assertEqual(started.name, "newcontainer") self.assertEqual(started.state, "pending") self.assertEqual(started.extra['state'], "starting") def test_stop_container(self): container = self.driver.get_container("1i31") stopped = container.stop() self.assertEqual(stopped.id, "1i31") self.assertEqual(stopped.name, "newcontainer") self.assertEqual(stopped.state, "pending") self.assertEqual(stopped.extra['state'], "stopping") def test_ex_search_containers(self): containers = self.driver.ex_search_containers({"state": "running"}) self.assertEqual(len(containers), 1) def test_destroy_container(self): container = self.driver.get_container("1i31") destroyed = container.destroy() self.assertEqual(destroyed.id, "1i31") self.assertEqual(destroyed.name, "newcontainer") self.assertEqual(destroyed.state, "pending") self.assertEqual(destroyed.extra['state'], "stopping") if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/test_docker.py0000664000175000017500000002562113153541406024044 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.test import unittest from libcloud.container.base import ContainerImage from libcloud.container.drivers.docker import DockerContainerDriver from libcloud.utils.py3 import httplib from libcloud.test.secrets import CONTAINER_PARAMS_DOCKER from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test import MockHttp class DockerContainerDriverTestCase(unittest.TestCase): def setUp(self): # Create a test driver for each version versions = ('linux_124', 'mac_124') self.drivers = [] for version in versions: DockerContainerDriver.connectionCls.conn_class = \ DockerMockHttp DockerMockHttp.type = None DockerMockHttp.use_param = 'a' driver = DockerContainerDriver(*CONTAINER_PARAMS_DOCKER) driver.version = version self.drivers.append(driver) def test_list_images(self): for driver in self.drivers: images = driver.list_images() self.assertEqual(len(images), 4) self.assertIsInstance(images[0], ContainerImage) self.assertEqual(images[0].id, 'cf55d61f5307b7a18a45980971d6cfd40b737dd661879c4a6b3f2aecc3bc37b0') self.assertEqual(images[0].name, 'mongo:latest') def test_install_image(self): for driver in self.drivers: image = driver.install_image('ubuntu:12.04') self.assertTrue(image is not None) self.assertEqual(image.id, '992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787') def test_list_containers(self): for driver in self.drivers: containers = driver.list_containers(all=True) self.assertEqual(len(containers), 6) self.assertEqual(containers[0].id, '160936dc54fe8c332095676d9379003534b8cddd7565fa63018996e06dae1b6b') self.assertEqual(containers[0].name, 'hubot') self.assertEqual(containers[0].image.name, 'stackstorm/hubot') def test_deploy_container(self): for driver in self.drivers: image = driver.list_images()[0] container = driver.deploy_container(image=image, name='test') self.assertEqual(container.id, 'a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') self.assertEqual(container.name, 'gigantic_goldberg') def test_get_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') self.assertEqual(container.id, 'a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') self.assertEqual(container.name, 'gigantic_goldberg') self.assertEqual(container.state, 'running') def test_start_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') container.start() def test_stop_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') container.stop() def test_restart_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') container.restart() def test_delete_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') container.destroy() def test_ex_rename_container(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') driver.ex_rename_container(container, 'bob') def test_ex_get_logs(self): for driver in self.drivers: container = driver.get_container('a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303') logs = driver.ex_get_logs(container) self.assertTrue(logs is not None) def test_ex_search_images(self): for driver in self.drivers: images = driver.ex_search_images('mysql') self.assertEqual(len(images), 25) self.assertEqual(images[0].name, 'mysql') class DockerMockHttp(MockHttp): fixtures = ContainerFileFixtures('docker') version = None def _version( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('linux_124/version.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _vlinux_124_images_search( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/search.json'), {}, httplib.responses[httplib.OK]) def _vmac_124_images_search( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('mac_124/search.json'), {}, httplib.responses[httplib.OK]) def _vlinux_124_images_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/images.json'), {}, httplib.responses[httplib.OK]) def _vmac_124_images_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/images.json'), {}, httplib.responses[httplib.OK]) def _vlinux_124_images_create( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/create_image.txt'), {'Content-Type': 'application/json', 'transfer-encoding': 'chunked'}, httplib.responses[httplib.OK]) def _vmac_124_images_create( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('mac_124/create_image.txt'), {'Content-Type': 'application/json', 'transfer-encoding': 'chunked'}, httplib.responses[httplib.OK]) def _vlinux_124_containers_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/containers.json'), {}, httplib.responses[httplib.OK]) def _vmac_124_containers_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('mac_124/containers.json'), {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_create( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/create_container.json'), {}, httplib.responses[httplib.OK]) def _vmac_124_containers_create( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('mac_124/create_container.json'), {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_start( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_start( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_restart( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_restart( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_rename( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_rename( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_stop( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_stop( self, method, url, body, headers): return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/container_a68.json'), {}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_json( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/container_a68.json'), {}, httplib.responses[httplib.OK]) def _vlinux_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_logs( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/logs.txt'), {'content-type': 'text/plain'}, httplib.responses[httplib.OK]) def _vmac_124_containers_a68c1872c74630522c7aa74b85558b06824c5e672cee334296c50fb209825303_logs( self, method, url, body, headers): return (httplib.OK, self.fixtures.load('linux_124/logs.txt'), {'content-type': 'text/plain'}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/container/test_docker_utils.py0000664000175000017500000000600413153541406025256 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.test import unittest from libcloud.container.utils.docker import HubClient from libcloud.utils.py3 import httplib from libcloud.test.file_fixtures import ContainerFileFixtures from libcloud.test import MockHttp class DockerUtilitiesTestCase(unittest.TestCase): def setUp(self): HubClient.connectionCls.conn_class = DockerMockHttp DockerMockHttp.type = None DockerMockHttp.use_param = 'a' self.driver = HubClient() def test_list_tags(self): tags = self.driver.list_images('ubuntu', max_count=100) self.assertEqual(len(tags), 88) self.assertEqual(tags[0].name, 'registry.hub.docker.com/ubuntu:xenial') def test_get_repository(self): repo = self.driver.get_repository('ubuntu') self.assertEqual(repo['name'], 'ubuntu') def test_get_image(self): image = self.driver.get_image('ubuntu', 'latest') self.assertEqual(image.id, '2343') self.assertEqual(image.name, 'registry.hub.docker.com/ubuntu:latest') self.assertEqual(image.path, 'registry.hub.docker.com/ubuntu:latest') class DockerMockHttp(MockHttp): fixtures = ContainerFileFixtures('docker_utils') def _v2_repositories_library_ubuntu_tags_latest( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v2_repositories_library_ubuntu_tags_latest.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_repositories_library_ubuntu_tags( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v2_repositories_library_ubuntu_tags.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v2_repositories_library_ubuntu( self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v2_repositories_library_ubuntu.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/0000775000175000017500000000000013160535110021574 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_dimensiondata_v2_3.py0000664000175000017500000006410113153541406026666 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.common.types import InvalidCredsError from libcloud.common.dimensiondata import DimensionDataVIPNode, DimensionDataPool from libcloud.common.dimensiondata import DimensionDataPoolMember from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm from libcloud.loadbalancer.drivers.dimensiondata \ import DimensionDataLBDriver as DimensionData from libcloud.loadbalancer.types import State from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import LoadBalancerFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS class DimensionData_v2_3_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.3' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_balancers() def test_create_balancer(self): self.driver.ex_set_current_network_domain('1234') members = [] members.append(Member( id=None, ip='1.2.3.4', port=80)) balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members, ex_listener_ip_address='5.6.7.8') self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') self.assertEqual(balancer.extra['listener_ip_address'], '5.6.7.8') def test_create_balancer_with_defaults(self): self.driver.ex_set_current_network_domain('1234') balancer = self.driver.create_balancer( name='test', port=None, protocol=None, algorithm=None, members=None) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, None) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_create_balancer_no_members(self): self.driver.ex_set_current_network_domain('1234') members = None balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_create_balancer_empty_members(self): self.driver.ex_set_current_network_domain('1234') members = [] balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_list_balancers(self): bal = self.driver.list_balancers() self.assertEqual(bal[0].name, 'myProduction.Virtual.Listener') self.assertEqual(bal[0].id, '6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal[0].port, '8899') self.assertEqual(bal[0].ip, '165.180.12.22') self.assertEqual(bal[0].state, State.RUNNING) def test_balancer_list_members(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) members = self.driver.balancer_list_members(balancer) self.assertEqual(2, len(members)) self.assertEqual(members[0].ip, '10.0.3.13') self.assertEqual(members[0].id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(members[0].port, 9889) def test_balancer_attach_member(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id=None, ip='112.12.2.2', port=80, balancer=balancer, extra=None) member = self.driver.balancer_attach_member(balancer, member) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') def test_balancer_attach_member_without_port(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id=None, ip='112.12.2.2', port=None, balancer=balancer, extra=None) member = self.driver.balancer_attach_member(balancer, member) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.port, None) def test_balancer_detach_member(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id='3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0', ip='112.12.2.2', port=80, balancer=balancer, extra=None) result = self.driver.balancer_detach_member(balancer, member) self.assertEqual(result, True) def test_destroy_balancer(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) response = self.driver.destroy_balancer(balancer) self.assertEqual(response, True) def test_set_get_network_domain_id(self): self.driver.ex_set_current_network_domain('1234') nwd = self.driver.ex_get_current_network_domain() self.assertEqual(nwd, '1234') def test_ex_create_pool_member(self): pool = DimensionDataPool( id='4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None ) node = DimensionDataVIPNode( id='2344', name='test', status=State.RUNNING, ip='123.23.3.2' ) member = self.driver.ex_create_pool_member( pool=pool, node=node, port=80 ) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.name, '10.0.3.13') self.assertEqual(member.ip, '123.23.3.2') def test_ex_create_node(self): node = self.driver.ex_create_node( network_domain_id='12345', name='test', ip='123.12.32.2', ex_description='', connection_limit=25000, connection_rate_limit=2000) self.assertEqual(node.name, 'myProductionNode.1') self.assertEqual(node.id, '9e6b496d-5261-4542-91aa-b50c7f569c54') def test_ex_create_pool(self, ): pool = self.driver.ex_create_pool( network_domain_id='1234', name='test', balancer_method='ROUND_ROBIN', ex_description='test', service_down_action='NONE', slow_ramp_time=30) self.assertEqual(pool.id, '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(pool.name, 'test') self.assertEqual(pool.status, State.RUNNING) def test_ex_create_virtual_listener(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', port=80, pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_unusual_port(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', port=8900, pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_without_port(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_without_pool(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test') self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_get_balancer(self): bal = self.driver.get_balancer('6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal.name, 'myProduction.Virtual.Listener') self.assertEqual(bal.id, '6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal.port, '8899') self.assertEqual(bal.ip, '165.180.12.22') self.assertEqual(bal.state, State.RUNNING) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertNotEqual(0, len(protocols)) def test_ex_get_nodes(self): nodes = self.driver.ex_get_nodes() self.assertEqual(2, len(nodes)) self.assertEqual(nodes[0].name, 'ProductionNode.1') self.assertEqual(nodes[0].id, '34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(nodes[0].ip, '10.10.10.101') def test_ex_get_node(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(node.name, 'ProductionNode.2') self.assertEqual(node.id, '34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(node.ip, '10.10.10.101') def test_ex_update_node(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') node.connection_limit = '100' result = self.driver.ex_update_node(node) self.assertEqual(result.connection_limit, '100') def test_ex_destroy_node(self): result = self.driver.ex_destroy_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertTrue(result) def test_ex_set_node_state(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') result = self.driver.ex_set_node_state(node, False) self.assertEqual(result.connection_limit, '10000') def test_ex_get_pools(self): pools = self.driver.ex_get_pools() self.assertNotEqual(0, len(pools)) self.assertEqual(pools[0].name, 'myDevelopmentPool.1') self.assertEqual(pools[0].id, '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') def test_ex_get_pool(self): pool = self.driver.ex_get_pool('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') self.assertEqual(pool.name, 'myDevelopmentPool.1') self.assertEqual(pool.id, '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') def test_ex_update_pool(self): pool = self.driver.ex_get_pool('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') pool.slow_ramp_time = '120' result = self.driver.ex_update_pool(pool) self.assertTrue(result) def test_ex_destroy_pool(self): response = self.driver.ex_destroy_pool( pool=DimensionDataPool( id='4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None)) self.assertTrue(response) def test_get_pool_members(self): members = self.driver.ex_get_pool_members('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') self.assertEqual(2, len(members)) self.assertEqual(members[0].id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(members[0].name, '10.0.3.13') self.assertEqual(members[0].status, 'NORMAL') self.assertEqual(members[0].ip, '10.0.3.13') self.assertEqual(members[0].port, 9889) self.assertEqual(members[0].node_id, '3c207269-e75e-11e4-811f-005056806999') def test_get_pool_member(self): member = self.driver.ex_get_pool_member('3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.name, '10.0.3.13') self.assertEqual(member.status, 'NORMAL') self.assertEqual(member.ip, '10.0.3.13') self.assertEqual(member.port, 9889) def test_set_pool_member_state(self): member = self.driver.ex_get_pool_member('3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') result = self.driver.ex_set_pool_member_state(member, True) self.assertTrue(result) def test_ex_destroy_pool_member(self): response = self.driver.ex_destroy_pool_member( member=DimensionDataPoolMember( id='', name='test', status=State.RUNNING, ip='1.2.3.4', port=80, node_id='3c207269-e75e-11e4-811f-005056806999'), destroy_node=False) self.assertTrue(response) def test_ex_destroy_pool_member_with_node(self): response = self.driver.ex_destroy_pool_member( member=DimensionDataPoolMember( id='', name='test', status=State.RUNNING, ip='1.2.3.4', port=80, node_id='34de6ed6-46a4-4dae-a753-2f8d3840c6f9'), destroy_node=True) self.assertTrue(response) def test_ex_get_default_health_monitors(self): monitors = self.driver.ex_get_default_health_monitors( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(monitors), 6) self.assertEqual(monitors[0].id, '01683574-d487-11e4-811f-005056806999') self.assertEqual(monitors[0].name, 'CCDEFAULT.Http') self.assertFalse(monitors[0].node_compatible) self.assertTrue(monitors[0].pool_compatible) def test_ex_get_default_persistence_profiles(self): profiles = self.driver.ex_get_default_persistence_profiles( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(profiles), 4) self.assertEqual(profiles[0].id, 'a34ca024-f3db-11e4-b010-005056806999') self.assertEqual(profiles[0].name, 'CCDEFAULT.Cookie') self.assertEqual(profiles[0].fallback_compatible, False) self.assertEqual(len(profiles[0].compatible_listeners), 1) self.assertEqual(profiles[0].compatible_listeners[0].type, 'PERFORMANCE_LAYER_4') def test_ex_get_default_irules(self): irules = self.driver.ex_get_default_irules( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(irules), 4) self.assertEqual(irules[0].id, '2b20cb2c-ffdc-11e4-b010-005056806999') self.assertEqual(irules[0].name, 'CCDEFAULT.HttpsRedirect') self.assertEqual(len(irules[0].compatible_listeners), 1) self.assertEqual(irules[0].compatible_listeners[0].type, 'PERFORMANCE_LAYER_4') class DimensionDataMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('dimensiondata') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_virtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_virtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_pool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_pool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_poolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_poolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createPool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createPool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_addPoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_addPoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createVirtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createVirtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_removePoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_removePoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deleteVirtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deleteVirtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deletePool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deletePool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deleteNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deleteNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_node(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_node.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editPool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editPool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editPoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editPoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultHealthMonitor(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultHealthMonitor.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultPersistenceProfile(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultPersistenceProfile.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultIrule(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultIrule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/__init__.py0000664000175000017500000000141512701023453023710 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/0000775000175000017500000000000013160535107023453 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/0000775000175000017500000000000013160535110024720 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json0000664000175000017500000000043612701023453031175 0ustar kamikami00000000000000{"summary":{"total":1,"start":0,"returned":1},"status":"failure","method":"/grid/loadbalancer/add","list":[{"message":"An unexpected server error has occured. Please email this error to apisupport@gogrid.com. Error Message : null","object":"error","errorcode":"UnexpectedException"}]} apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json0000664000175000017500000001337312701023453031240 0ustar kamikami00000000000000{ "list": [ { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 23530, "name": "test2", "object": "loadbalancer", "os": { "description": "The F5 Load Balancer.", "id": 1, "name": "F5", "object": "option" }, "persistence": { "description": "", "id": 1, "name": "None", "object": "option" }, "realiplist": [ { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868108, "ip": "10.0.0.75", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868109, "ip": "10.0.0.76", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868110, "ip": "10.0.0.77", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.0.0.78", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } ], "state": { "description": "Loadbalancer is enabled and on.", "id": 1, "name": "On", "object": "option" }, "type": { "description": "", "id": 1, "name": "Round Robin", "object": "option" }, "virtualip": { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868101, "ip": "10.0.0.68", "object": "ip", "public": true, "state": { "description": "IP is reserved or in use", "id": 2, "name": "Assigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } } ], "method": "/grid/loadbalancer/edit", "status": "success", "summary": { "numpages": 0, "returned": 1, "start": 0, "total": 1 } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json0000664000175000017500000002256312701023453027270 0ustar kamikami00000000000000{ "list": [ { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868101, "ip": "10.0.0.68", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868102, "ip": "10.0.0.69", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868106, "ip": "10.0.0.73", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868107, "ip": "10.0.0.74", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868108, "ip": "10.0.0.75", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868109, "ip": "10.0.0.76", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868110, "ip": "10.0.0.77", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.0.0.78", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277337, "ip": "10.0.0.244", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277338, "ip": "10.0.0.245", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277342, "ip": "10.0.0.249", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277343, "ip": "10.0.0.250", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277344, "ip": "10.0.0.251", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277345, "ip": "10.0.0.252", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277346, "ip": "10.0.0.253", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" }, { "datacenter": { "description": "US East 1 Datacenter", "id": 2, "name": "US-East-1", "object": "option" }, "id": 2277347, "ip": "10.0.0.254", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.240/255.255.255.240" } ], "method": "/grid/ip/list", "status": "success", "summary": { "numpages": 0, "returned": 16, "start": 0, "total": 16 } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json0000664000175000017500000001151512701023453031037 0ustar kamikami00000000000000{ "list": [ { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "name": "test2", "id": 123, "object": "loadbalancer", "os": { "description": "The F5 Load Balancer.", "id": 1, "name": "F5", "object": "option" }, "persistence": { "description": "", "id": 1, "name": "None", "object": "option" }, "realiplist": [ { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868109, "ip": "10.1.0.10", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.1.0.10/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868110, "ip": "10.1.0.11", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.1.0.11/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.1.0.12", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.1.0.12/255.255.255.240" }, "object": "ipportpair", "port": 80 } ], "state": { "description": "Loadbalancer is enabled and on.", "id": 1, "name": "On", "object": "option" }, "type": { "description": "", "id": 1, "name": "Round Robin", "object": "option" }, "virtualip": { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868101, "ip": "1.1.1.1", "object": "ip", "public": true, "state": { "description": "IP is reserved or in use", "id": 2, "name": "Assigned", "object": "option" }, "subnet": "1.1.1.1/255.255.255.240" }, "object": "ipportpair", "port": 80 } } ], "method": "/grid/loadbalancer/add", "status": "success", "summary": { "numpages": 0, "returned": 1, "start": 0, "total": 1 } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json0000664000175000017500000001721712701023453031267 0ustar kamikami00000000000000{ "list": [ { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 23517, "name": "foo", "object": "loadbalancer", "os": { "description": "The F5 Load Balancer.", "id": 1, "name": "F5", "object": "option" }, "persistence": { "description": "", "id": 1, "name": "None", "object": "option" }, "realiplist": [ { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.0.0.78", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } ], "state": { "description": "Loadbalancer is enabled and on.", "id": 1, "name": "On", "object": "option" }, "type": { "description": "", "id": 1, "name": "Round Robin", "object": "option" }, "virtualip": { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868099, "ip": "10.0.0.66", "object": "ip", "public": true, "state": { "description": "IP is reserved or in use", "id": 2, "name": "Assigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } }, { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 23526, "name": "bar", "object": "loadbalancer", "os": { "description": "The F5 Load Balancer.", "id": 1, "name": "F5", "object": "option" }, "persistence": { "description": "", "id": 1, "name": "None", "object": "option" }, "realiplist": [ { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868109, "ip": "10.0.0.76", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868110, "ip": "10.0.0.77", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.0.0.78", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } ], "state": { "description": "Loadbalancer is enabled and on.", "id": 1, "name": "On", "object": "option" }, "type": { "description": "", "id": 1, "name": "Round Robin", "object": "option" }, "virtualip": { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868100, "ip": "10.0.0.67", "object": "ip", "public": true, "state": { "description": "IP is reserved or in use", "id": 2, "name": "Assigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } } ], "method": "/grid/loadbalancer/list", "status": "success", "summary": { "numpages": 0, "returned": 2, "start": 0, "total": 2 } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json0000664000175000017500000001152412701023453031066 0ustar kamikami00000000000000{ "list": [ { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 23530, "name": "test2", "object": "loadbalancer", "os": { "description": "The F5 Load Balancer.", "id": 1, "name": "F5", "object": "option" }, "persistence": { "description": "", "id": 1, "name": "None", "object": "option" }, "realiplist": [ { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868109, "ip": "10.0.0.76", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868110, "ip": "10.0.0.77", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 }, { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868111, "ip": "10.0.0.78", "object": "ip", "public": true, "state": { "description": "IP is available to use", "id": 1, "name": "Unassigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } ], "state": { "description": "Loadbalancer is enabled and on.", "id": 1, "name": "On", "object": "option" }, "type": { "description": "", "id": 1, "name": "Round Robin", "object": "option" }, "virtualip": { "ip": { "datacenter": { "description": "US West 1 Datacenter", "id": 1, "name": "US-West-1", "object": "option" }, "id": 1868101, "ip": "10.0.0.68", "object": "ip", "public": true, "state": { "description": "IP is reserved or in use", "id": 2, "name": "Assigned", "object": "option" }, "subnet": "10.0.0.64/255.255.255.240" }, "object": "ipportpair", "port": 80 } } ], "method": "/grid/loadbalancer/get", "status": "success", "summary": { "numpages": 0, "returned": 1, "start": 0, "total": 1 } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/0000775000175000017500000000000013160535110025401 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.0000664000175000017500000000140712701023453033744 0ustar kamikami00000000000000{"protocols": [ { "name": "HTTP", "port": "80" }, { "name": "FTP", "port": "21" }, { "name": "IMAPv4", "port": "143" }, { "name": "POP3", "port": "110" }, { "name": "SMTP", "port": "25" }, { "name": "LDAP", "port": "389" }, { "name": "HTTPS", "port": "443" }, { "name": "IMAPS", "port": "993" }, { "name": "POP3S", "port": "995" }, { "name": "LDAPS", "port": "636" } ] } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json0000664000175000017500000000521712701023453027704 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "expires": "2031-03-14T08:10:14.000-05:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "DFW", "tenantId": "MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "publicURL": "https://storage101.dfw1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "internalURL": "https://snet-storage101.dfw1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "region": "DFW", "tenantId": "11111", "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/11111", "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", "versionList": "https://dfw.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "tenantId": "11111", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/11111", "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "publicURL": "https://cdn1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" } ], "user": { "id": "9586", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "libclouduser" } } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_http0000664000175000017500000000200712701023453033400 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":94697,"protocol":"HTTPS","port":443,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTPS","path":"/test","delay":15,"timeout":12,"attemptsBeforeDeactivation":5,"statusRegex":"^[234][0-9][0-9]$","bodyRegex":"abcdef"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T19:34:34Z"},"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"}],"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}} apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json0000664000175000017500000000261412701023453033216 0ustar kamikami00000000000000{ "loadBalancer": { "algorithm": "RANDOM", "cluster": { "name": "ztm-n05.lbaas.ord1.rackspace.net" }, "connectionLogging": { "enabled": false }, "created": { "time": "2011-04-07T16:27:50Z" }, "id": 8291, "name": "test8291", "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE", "weight": 12 }, { "address": "10.1.0.10", "condition": "DISABLED", "id": 30945, "port": 80, "status": "OFFLINE", "weight": 8 }, { "address": "10.1.0.9", "condition": "DRAINING", "id": 30946, "port": 8080, "status": "DRAINING", "weight": 20 } ], "port": 80, "protocol": "HTTP", "status": "ACTIVE", "updated": { "time": "2011-04-07T16:28:12Z" }, "virtualIps": [ { "address": "1.1.1.1", "id": 1151, "ipVersion": "IPV4", "type": "PUBLIC" } ] } } ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weig0000664000175000017500000000120412701023453033346 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T15:18:38Z"},"connectionLogging":{"enabled":false}}} ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http0000664000175000017500000000166712701023453033412 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":94696,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTP","path":"/","delay":10,"timeout":5,"attemptsBeforeDeactivation":2,"statusRegex":"^[234][0-9][0-9]$","bodyRegex":"Hello World!"},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:51:32Z"},"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}}apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json0000664000175000017500000000264412701023453033220 0ustar kamikami00000000000000{ "loadBalancer": { "algorithm": "RANDOM", "cluster": { "name": "ztm-n05.lbaas.ord1.rackspace.net" }, "connectionLogging": { "enabled": false }, "created": { "time": "2011-04-07T16:27:50Z" }, "id": 8290, "name": "test2", "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE", "weight": 12 }, { "address": "10.1.0.10", "condition": "DISABLED", "id": 30945, "port": 80, "status": "OFFLINE", "weight": 8 }, { "address": "10.1.0.9", "condition": "DRAINING", "id": 30946, "port": 8080, "status": "DRAINING", "weight": 20 } ], "port": 80, "protocol": "HTTP", "status": "ACTIVE", "updated": { "time": "2011-04-07T16:28:12Z" }, "virtualIps": [ { "address": "1.1.1.1", "id": 1151, "ipVersion": "IPV4", "type": "PUBLIC" } ] } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json0000664000175000017500000000261412701023453033217 0ustar kamikami00000000000000{ "loadBalancer": { "algorithm": "RANDOM", "cluster": { "name": "ztm-n05.lbaas.ord1.rackspace.net" }, "connectionLogging": { "enabled": false }, "created": { "time": "2011-04-07T16:27:50Z" }, "id": 8292, "name": "test8292", "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE", "weight": 12 }, { "address": "10.1.0.10", "condition": "DISABLED", "id": 30945, "port": 80, "status": "OFFLINE", "weight": 8 }, { "address": "10.1.0.9", "condition": "DRAINING", "id": 30946, "port": 8080, "status": "DRAINING", "weight": 20 } ], "port": 80, "protocol": "HTTP", "status": "ACTIVE", "updated": { "time": "2011-04-07T16:28:12Z" }, "virtualIps": [ { "address": "1.1.1.2", "id": 1151, "ipVersion": "IPV4", "type": "PUBLIC" } ] } } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with0000664000175000017500000000212212701023453033373 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":94698,"protocol":"HTTPS","port":443,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"DRAINING","weight":25},{"address":"10.181.238.11","id":97683,"port":443,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"CONNECT","delay":5,"timeout":10,"attemptsBeforeDeactivation":4},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2012-01-05T19:31:38Z"},"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"},{"address":"8.8.8.8/0","id":3006,"type":"DENY"}],"connectionThrottle":{"maxConnections":200,"minConnections":50,"maxConnectionRate":50,"rateInterval":10},"connectionLogging":{"enabled":true}}}apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json0000664000175000017500000000220312701023453033517 0ustar kamikami00000000000000{ "loadBalancer": { "algorithm": "UUUUUUUUUU", "cluster": { "name": "ztm-n05.lbaas.ord1.rackspace.net" }, "connectionLogging": { "enabled": false }, "created": { "time": "2011-04-07T16:27:50Z" }, "id": 9999999, "name": "test2", "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE" }, { "address": "10.1.0.10", "condition": "ENABLED", "id": 30945, "port": 80, "status": "ONLINE" } ], "port": 88888, "protocol": "XXXXX", "status": "ACTIVE", "updated": { "time": "2011-04-07T16:28:12Z" }, "virtualIps": [ { "address": "1.1.1.1", "id": 1151, "ipVersion": "IPV4", "type": "PUBLIC" } ] } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json0000664000175000017500000001423412701023453030367 0ustar kamikami00000000000000{ "access": { "token": { "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "expires": "2031-11-23T21:00:14.000-06:00" }, "serviceCatalog": [ { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", "version": { "versionInfo": "https://cdn2.clouddrive.com/v1/", "versionList": "https://cdn2.clouddrive.com/", "versionId": "1" } } ], "name": "cloudFilesCDN", "type": "rax:object-cdn" }, { "endpoints": [ { "region": "ORD", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage4.ord1.clouddrive.com/v1/MossoCloudFS", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" }, { "region": "LON", "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage4.lon1.clouddrive.com/v1/MossoCloudFS", "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", "type": "object-store" }, { "endpoints": [ { "tenantId": "1337", "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", "version": { "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", "versionList": "https://servers.api.rackspacecloud.com/", "versionId": "1.0" } } ], "name": "cloudServers", "type": "compute" }, { "endpoints": [ { "region": "RegionOne", "tenantId": "1337", "publicURL": "https://127.0.0.1/v2/1337", "versionInfo": "https://127.0.0.1/v2/", "versionList": "https://127.0.0.1/", "versionId": "2" } ], "name": "nova", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "613469", "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", "versionList": "https://dfw.servers.api.rackspacecloud.com/", "versionId": "2" }, { "region": "ORD", "tenantId": "613469", "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", "versionList": "https://ord.servers.api.rackspacecloud.com/", "versionId": "2" } ], "name": "cloudServersOpenStack", "type": "compute" }, { "endpoints": [ { "region": "DFW", "tenantId": "1337", "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" } ], "name": "cloudServersPreprod", "type": "compute" }, { "name": "cloudLoadBalancers", "endpoints": [ { "region": "SYD", "tenantId": "11111", "publicURL": "https://syd.loadbalancers.api.rackspacecloud.com/v1.0/11111" }, { "region": "DFW", "tenantId": "11111", "publicURL": "https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111" }, { "region": "ORD", "tenantId": "11111", "publicURL": "https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111" }, { "region": "LON", "tenantId": "11111", "publicURL": "https://lon.loadbalancers.api.rackspacecloud.com/v1.0/11111" } ], "type": "rax:load-balancer" } ], "user": { "id": "7", "roles": [ { "id": "identity:default", "description": "Default Role.", "name": "identity:default" } ], "name": "testuser" } } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddres0000664000175000017500000000057412701023453033756 0ustar kamikami00000000000000{"loadBalancers": [ { "name": "First Loadbalancer", "id": 1, "status": "ACTIVE" }, { "name": "Second Loadbalancer", "id": 2, "status": "PENDING_UPDATE" }, { "name": "Third Loadbalancer", "id": 8, "status": "ERROR" } ] } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json0000664000175000017500000000237212701023453032534 0ustar kamikami00000000000000{ "loadBalancers": [ { "algorithm": "RANDOM", "created": { "time": "2011-04-06T21:25:19+0000" }, "id": 8155, "name": "test0", "port": 80, "protocol": "HTTP", "status": "ACTIVE", "updated": { "time": "2011-04-06T21:25:31+0000" }, "virtualIps": [ { "address": "1.1.1.25", "id": 965, "ipVersion": "IPV4", "type": "PUBLIC" } ] }, { "algorithm": "RANDOM", "created": { "time": "2011-04-06T21:26:22+0000" }, "id": 8156, "name": "test1", "port": 80, "protocol": "HTTP", "status": "ACTIVE", "updated": { "time": "2011-04-06T21:26:33+0000" }, "virtualIps": [ { "address": "1.1.1.83", "id": 1279, "ipVersion": "IPV4", "type": "PUBLIC" } ] } ] } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full0000664000175000017500000000156412701023453033370 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":94695,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"CONNECT","delay":10,"timeout":5,"attemptsBeforeDeactivation":2},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:01:36Z"},"connectionThrottle":{"maxConnections":200,"minConnections":50,"maxConnectionRate":50,"rateInterval":10},"connectionLogging":{"enabled":true}}}././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_p0000664000175000017500000000113512701023453033345 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:32:05Z"},"connectionLogging":{"enabled":false}}} ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_p0000664000175000017500000000113512701023453033340 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:32:05Z"},"connectionLogging":{"enabled":false}}} apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json0000664000175000017500000000227312701023453033601 0ustar kamikami00000000000000{ "loadBalancer": { "algorithm": "RANDOM", "cluster": { "name": "ztm-n05.lbaas.ord1.rackspace.net" }, "connectionLogging": { "enabled": false }, "created": { "time": "2011-04-07T16:27:50+0000" }, "id": 8290, "name": "test2", "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE", "weight": 1 }, { "address": "10.1.0.10", "condition": "ENABLED", "id": 30945, "port": 80, "status": "ONLINE", "weight": 1 } ], "port": 80, "protocol": "HTTP", "status": "BUILD", "updated": { "time": "2011-04-07T16:27:50+0000" }, "virtualIps": [ { "address": "1.1.1.1", "id": 1151, "ipVersion": "IPV4", "type": "PUBLIC" } ] } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weig0000664000175000017500000000116512701023453033353 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_ROUND_ROBIN","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T14:39:40Z"},"connectionLogging":{"enabled":false}}}././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_error0000664000175000017500000000007312701023453033453 0ustar kamikami00000000000000{"errorpage":{"content":"Generic Error Page"}}././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unkn0000664000175000017500000000120612701023453033371 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"UNSPECIFIED_FUTURE_ALGORITHM","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T15:18:38Z"},"connectionLogging":{"enabled":false}}} ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usa0000664000175000017500000000207612701023453033336 0ustar kamikami00000000000000{ "links": [], "loadBalancerUsageRecords": [ { "id": 1234, "startTime": "2013-04-22T22:00:00Z", "endTime": "2013-04-22T23:00:00Z", "numVips": 1, "incomingTransfer": 0, "outgoingTransfer": 0, "incomingTransferSsl": 6182163, "outgoingTransferSsl": 9702071, "vipType": "PUBLIC", "averageNumConnections": 0, "averageNumConnectionsSsl": 14.9166666666666, "numPolls": 12, "sslMode": "ON" }, { "id": 12345, "startTime": "2013-04-22T23:00:00Z", "endTime": "2013-04-23T00:00:00Z", "numVips": 1, "incomingTransfer": 0, "outgoingTransfer": 0, "incomingTransferSsl": 6815503, "outgoingTransferSsl": 10474092, "vipType": "PUBLIC", "averageNumConnections": 0, "averageNumConnectionsSsl": 19.9166666666667, "numPolls": 12, "sslMode": "ON" } ] } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes0000664000175000017500000000117712701023453033440 0ustar kamikami00000000000000{ "nodes": [ { "address": "10.1.0.11", "condition": "ENABLED", "id": 30944, "port": 80, "status": "ONLINE", "weight": 12 }, { "address": "10.1.0.10", "condition": "DISABLED", "id": 30945, "port": 80, "status": "OFFLINE", "weight": 8 }, { "address": "10.1.0.9", "condition": "DRAINING", "id": 30946, "port": 8080, "status": "DRAINING", "weight": 20 } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_erro0000664000175000017500000000065512701023453033362 0ustar kamikami00000000000000{"errorpage":{"content":"Service Unavailable

Service Unavailable

The service is temporarily unavailable. Please try again later.

"}}././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes0000664000175000017500000000064412701023453033440 0ustar kamikami00000000000000{ "nodes": [ { "address": "10.1.0.12", "condition": "ENABLED", "id": 30972, "port": 80, "status": "ONLINE", "weight": 1 }, { "address": "10.1.0.13", "condition": "ENABLED", "id": 30973, "port": 80, "status": "ONLINE", "weight": 1 } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes0000664000175000017500000000034312701023453033432 0ustar kamikami00000000000000{ "nodes": [ { "address": "10.1.0.12", "condition": "ENABLED", "id": 30972, "port": 80, "status": "ONLINE", "weight": 1 } ] } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json0000664000175000017500000000065512701023453032135 0ustar kamikami00000000000000{"errorpage":{"content":"Service Unavailable

Service Unavailable

The service is temporarily unavailable. Please try again later.

"}}././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_p0000664000175000017500000000113112701023453033335 0ustar kamikami00000000000000{"loadBalancer":{"name":"servicenet","id":18941,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"BUILD","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.183.32.247","id":94693,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:33:28Z"},"virtualIps":[{"address":"10.183.252.175","id":572,"type":"SERVICENET","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:33:28Z"},"connectionLogging":{"enabled":false}}}././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_acce0000664000175000017500000000017412701023453033302 0ustar kamikami00000000000000{"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"}]}././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_acce0000664000175000017500000000025412701023453033317 0ustar kamikami00000000000000{"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"},{"address":"8.8.8.8/0","id":3006,"type":"DENY"}]}././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms0000664000175000017500000000023512701023453034011 0ustar kamikami00000000000000{"algorithms":[{"name":"LEAST_CONNECTIONS"},{"name":"RANDOM"},{"name":"ROUND_ROBIN"},{"name":"WEIGHTED_LEAST_CONNECTIONS"},{"name":"WEIGHTED_ROUND_ROBIN"}]} ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http0000664000175000017500000000163412701023453033366 0ustar kamikami00000000000000{"loadBalancer":{"name":"new ord balancer","id":94700,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTP","path":"/","delay":10,"timeout":5,"attemptsBeforeDeactivation":2,"statusRegex":"^[234][0-9][0-9]$"},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:51:32Z"},"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}}apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/0000775000175000017500000000000013160535110024225 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/create_load_balancer_http_listener.xml0000664000175000017500000000027212701023453034007 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/delete_load_balancer.xml0000664000175000017500000000023712701023453031043 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/upload_server_certificate.xml0000664000175000017500000000051312701023453032164 0ustar kamikami00000000000000 365F4154-92F6-4AE4-92F8-7FF34B540710 idkp-123-cn-test-01 cert1 01:DF:AB:CD apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/remove_backend_servers.xml0000664000175000017500000000061712701023453031472 0ustar kamikami00000000000000 365F4154-92F6-4AE4-92F8-7FF34B540710 139a00604ad-cn-east-hangzhou-01 node1 100 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/start_load_balancer_listener.xml0000664000175000017500000000027012701023453032640 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/set_server_certificate_name.xml0000664000175000017500000000026012701023453032472 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/add_backend_servers.xml0000664000175000017500000000061112701023453030717 0ustar kamikami00000000000000 365F4154-92F6-4AE4-92F8-7FF34B540710 139a00604ad-cn-east-hangzhou-01 node1 100 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/describe_load_balancers.xml0000664000175000017500000000142412701023453031543 0ustar kamikami00000000000000 1452403099000 cn-hangzhou-b classic cn-hangzhou-d cn-hangzhou 2016-01-10T13:18Z cn-hangzhou-dg-a01
120.27.186.149
internet 15229f88562-cn-hangzhou-dg-a01 abc active
A0DAF856-B181-4098-B507-6CE9E40420E8
apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/describe_load_balancer_attribute.xml0000664000175000017500000000247612701023453033453 0ustar kamikami00000000000000 1452403099000 cn-hangzhou i-23tshnsdq 100 15229f88562-cn-hangzhou-dg-a01 80 paybytraffic cn-hangzhou-b classic cn-hangzhou-d http 80 2016-01-10 13:18:19
120.27.186.149
cn-hangzhou-dg-a01 D67B80CD-C359-4352-AE3C-4F286CC3782D internet http 80 active 1
apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/describe_server_certificates.xml0000664000175000017500000000130612701023453032644 0ustar kamikami00000000000000 365F4154-92F6-4AE4-92F8-7FF34B540710 139a00604ad-cn-east-hangzhou-01 abe A:B:E 139a00604ad-cn-east-hangzhou-02 abf A:B:F apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/create_load_balancer.xml0000664000175000017500000000070012701023453031037 0ustar kamikami00000000000000 365F4154-92F6-4AE4-92F8-7FF34B540710 139a00604ad-cn-east-hangzhou-01
42.250.6.36
classic cn-hangzhou-d cn-hangzhou-b balancer1
apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/slb/delete_server_certificate.xml0000664000175000017500000000025612701023453032146 0ustar kamikami00000000000000 CEF72CEB-54B6-4AE8-B225-F876FF7BA984 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/0000775000175000017500000000000013160535110025601 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.0000664000175000017500000000014312701023453033631 0ustar kamikami00000000000000{ "createloadbalancerruleresponse" : {"id":"2253","jobid":"e3f6740b-c9e3-40c5-83e2-04d929482ef4"} }apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json0000664000175000017500000000023412701023453033230 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17340,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json0000664000175000017500000000017412701023453032176 0ustar kamikami00000000000000{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json0000664000175000017500000000007612701023453033757 0ustar kamikami00000000000000{ "associateipaddressresponse" : {"jobid":17346,"id":34000} } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_defa0000664000175000017500000000007312701023453033706 0ustar kamikami00000000000000{ "removefromloadbalancerruleresponse" : {"jobid":17340} } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_d0000664000175000017500000000336112701023453033737 0ustar kamikami00000000000000{ "listloadbalancerruleinstancesresponse" : { "loadbalancerruleinstance" : [ {"id":2614,"name":"test_1308874974","displayname":"test_1308874974","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:22:56+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2185,"networkkbswrite":109,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3914,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.3.122","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2615,"name":"test_1308875456","displayname":"test_1308875456","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:30:57+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":1118,"networkkbswrite":75,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3915,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.2.62","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_defaul0000664000175000017500000000007112701023453033713 0ustar kamikami00000000000000{ "assigntoloadbalancerruleresponse" : {"jobid":17341} } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.j0000664000175000017500000000006612701023453033736 0ustar kamikami00000000000000{ "disassociateipaddressresponse" : {"jobid":17344} } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.j0000664000175000017500000000077512701023453033711 0ustar kamikami00000000000000{ "listloadbalancerrulesresponse" : { "loadbalancerrule" : [ {"id":"2253","name":"test","publicipid":34000,"publicip":"1.1.1.49","publicport":"80","privateport":"80","algorithm":"roundrobin","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"},{"id":2249,"name":"testmore","publicipid":34001,"publicip":"1.1.2.49","publicport":"80","privateport":"80","algorithm":"leastconn","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"} ] } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.jsonapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.0000664000175000017500000000006712701023453033635 0ustar kamikami00000000000000{ "deleteloadbalancerruleresponse" : {"jobid":17342} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json0000664000175000017500000000023412701023453033231 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17341,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json0000664000175000017500000000023412701023453033234 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17344,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json0000664000175000017500000000023412701023453033232 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17342,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json0000664000175000017500000000071312701023453033240 0ustar kamikami00000000000000{ "queryasyncjobresultresponse" : {"jobid":17346,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"ipaddress":{"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-24T05:52:55+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocating"}}} } apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/brightbox/0000775000175000017500000000000013160535110025435 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json0000664000175000017500000000135012701023453031262 0ustar kamikami00000000000000[{"id": "lba-1235f", "resource_type": "load_balancer", "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-1235f", "name": "lb1", "created_at": "2011-10-06T14:50:28Z", "deleted_at": null, "status": "active", "listeners": [{"out": 80, "protocol": "http", "in": 80}], "cloud_ips": [{"id": "cip-c2v98", "public_ip": "109.107.37.179", "resource_type": "cloud_ip", "reverse_dns": "cip-109-107-37-179.gb1.brightbox.com", "status": "mapped", "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-c2v98"}], "account": {"id": "acc-43ks4", "resource_type": "account", "url": "https://api.gb1.brightbox.com/1.0/account", "name": "Brightbox", "status": "active"}, "nodes": []}] apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/brightbox/token.json0000664000175000017500000000011012701023453027442 0ustar kamikami00000000000000{"access_token": "k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json0000664000175000017500000000172612701023453032336 0ustar kamikami00000000000000{"id": "lba-o466u", "resource_type": "load_balancer", "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-o466u", "policy": "least-connections", "name": "lb2", "created_at": "2011-10-01T01:00:00Z", "deleted_at": null, "healthcheck": {"threshold_down": 3, "timeout": 5000, "port": 80, "request": "/", "type": "http", "interval": 5000, "threshold_up": 3}, "listeners": [{"out": 80, "protocol": "http", "in": 80}], "status": "creating", "cloud_ips": [], "account": {"id": "acc-43ks4", "resource_type": "account", "url": "https://api.gb1.brightbox.com/1.0/account", "name": "Brightbox", "status": "active"}, "nodes": [{"id": "srv-lv426", "resource_type": "server", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426", "name": "web1", "created_at": "2011-10-01T01:00:00Z", "deleted_at": null, "hostname": "srv-lv426", "started_at": "2011-10-01T01:01:00Z", "status": "active"}]}apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json0000664000175000017500000000172412701023453032725 0ustar kamikami00000000000000{"id": "lba-1235f", "resource_type": "load_balancer", "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-1235f", "policy": "least-connections", "name": "lb1", "created_at": "2011-10-01T01:00:00Z", "deleted_at": null, "healthcheck": {"threshold_down": 3, "timeout": 5000, "port": 80, "request": "/", "type": "http", "interval": 5000, "threshold_up": 3}, "listeners": [{"out": 80, "protocol": "http", "in": 80}], "status": "active", "cloud_ips": [], "account": {"id": "acc-43ks4", "resource_type": "account", "url": "https://api.gb1.brightbox.com/1.0/account", "name": "Brightbox", "status": "active"}, "nodes": [{"id": "srv-lv426", "resource_type": "server", "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426", "name": "web1", "created_at": "2011-10-01T01:00:00Z", "deleted_at": null, "hostname": "srv-lv426", "started_at": "2011-10-01T01:01:00Z", "status": "active"}]}apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/0000775000175000017500000000000013160535110024207 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml0000664000175000017500000000217012701023453033230 0ustar kamikami00000000000000 MyDurationStickyPolicy LBCookieStickinessPolicyType CookieExpirationPeriod 60 MyAppStickyPolicy AppCookieStickinessPolicyType CookieName MyAppCookie 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balance0000664000175000017500000000025112701023453034025 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/describe_tags.xml0000664000175000017500000000116213153541406027536 0ustar kamikami00000000000000 lima project tests 07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml0000664000175000017500000000025212701023453032402 0ustar kamikami00000000000000 tests.example.com ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.x0000664000175000017500000000171312701023453034000 0ustar kamikami00000000000000 ProxyProtocol Boolean ONE ProxyProtocolPolicyType Policy that controls whether to include the IP address and port of the originating request for TCP messages. This policy operates on TCP/SSL listeners only 1549581b-12b7-11e3-895e-1334aEXAMPLE ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend0000664000175000017500000000050412701023453033760 0ustar kamikami00000000000000 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml0000664000175000017500000000425212701023453031527 0ustar kamikami00000000000000 tests 2013-01-01T00:00:00.19000Z 30 TCP:22 10 5 2 vpc-56e10e3d AWSConsolePolicy-1 HTTP 80 HTTP 80 i-64bd081c AWSConsolePolicy-1 30 us-east-1e tests.us-east-1.elb.amazonaws.com Z3ZONEID internet-facing tests.us-east-1.elb.amazonaws.com f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener0000664000175000017500000000026312701023453034036 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml0000664000175000017500000000023712701023453031026 0ustar kamikami00000000000000 tests.example.com apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/0000775000175000017500000000000013160535110025455 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Order_placeOrder.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Order_plac0000664000175000017500000003675112701023453033727 0ustar kamikami00000000000000 orderDate 2015-04-24T15:19:20+02:00 orderId 4320271 orderDetails bigDataOrderFlag 0 billingOrderItemId containerSplHash 00000000 currencyShortName USD extendedHardwareTesting imageTemplateId isManagedOrder 0 location 265592 message packageId 0 paymentType ADD_TO_BALANCE postTaxRecurring 29 postTaxRecurringHourly 0 postTaxRecurringMonthly 29 postTaxSetup 0 preTaxRecurring 29 preTaxRecurringHourly 0 preTaxRecurringMonthly 29 preTaxSetup 0 presetId primaryDiskPartitionId privateCloudOrderFlag 0 proratedInitialCharge 6.77 proratedOrderTotal 6.77 quantity 1 resourceGroupId resourceGroupTemplateId sendQuoteEmailFlag serverCoreCount sourceVirtualGuestId stepId taxCacheHash b94c43e22aeaedb3dc4 taxCompletedFlag 1 totalRecurringTax 0 totalSetupTax 0 useHourlyPricing 0 billingInformation billingAddressLine1 University billingCity Haifa billingCountryCode IL billingEmail someone@com billingNameCompany IBM billingNameFirst Foo billingNameLast Bar billingPhoneVoice 972-4-44444 billingPostalCode 0000 billingState OT cardExpirationMonth cardExpirationYear taxExempt 0 itemCategoryQuestionAnswers locationObject id 265592 longName Amsterdam 1 name ams01 prices id 33021 itemId 565 laborFee 0 oneTimeFee 0 recurringFee 29 setupFee 0 categories categoryCode proxy_load_balancer id 55 name Proxy Load Balancer item capacity 50 description Load Balancer 50 connections id 565 keyName LOAD_BALANCER_50_CONNECTIONS units Connections/Second bundle properties sshKeys storageGroups placedOrder accountId 411826 id 4320271 orderQuoteId orderTypeId 4 presaleEventId status PENDING_AUTO_APPROVAL userRecordId 315282 account brandId 30130 companyName IBM id 411826 items categoryCode proxy_load_balancer description Load Balancer 50 connections id 65021573 itemId 565 itemPriceId 33021 laborFee 0 oneTimeFee 0 parentId promoCodeId recurringFee 29 setupFee 0 children userRecord accountId 00000 firstName Foo id 00000 lastName Foo username foo ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Location_Datacenter_getDatacenters.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Location_Datacente0000664000175000017500000003053112701023453033663 0ustar kamikami00000000000000 id 265592 longName Amsterdam 1 name ams01 statusId 2 id 814994 longName Amsterdam 3 name ams03 statusId 2 id 3 longName Dallas 1 name dal01 statusId 2 id 154770 longName Dallas 2 name dal02 statusId 2 id 167092 longName Dallas 4 name dal04 statusId 2 id 138124 longName Dallas 5 name dal05 statusId 2 id 154820 longName Dallas 6 name dal06 statusId 2 id 142776 longName Dallas 7 name dal07 statusId 2 id 449494 longName Dallas 9 name dal09 statusId 2 id 449506 longName Frankfurt 2 name fra02 statusId 2 id 352494 longName Hong Kong 2 name hkg02 statusId 2 id 142775 longName Houston 2 name hou02 statusId 2 id 358694 longName London 2 name lon02 statusId 2 id 449596 longName Melbourne 1 name mel01 statusId 2 id 449600 longName Mexico 1 name mex01 statusId 2 id 449610 longName Montreal 1 name mon01 statusId 2 id 449500 longName Paris 1 name par01 statusId 2 id 168642 longName San Jose 1 name sjc01 statusId 2 id 18171 longName Seattle name sea01 statusId 2 id 224092 longName Singapore 1 name sng01 statusId 2 id 449612 longName Sydney 1 name syd01 statusId 2 id 449604 longName Tokyo 2 name tok02 statusId 2 id 448994 longName Toronto 1 name tor01 statusId 2 id 37473 longName Washington 1 name wdc01 statusId 2 ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Package_getItems.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Package_ge0000664000175000017500000036173712701023453033670 0ustar kamikami00000000000000 capacity 0 description Citrix VDC id 1148 itemTaxCategoryId 166 keyName CITRIX_VDC softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 24912 itemId 1148 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 0 capacity 65000 description Load Balancer (Dedicated, with SSL offload) - 15,000 Connections id 4136 itemTaxCategoryId 166 keyName LOAD_BALANCER_DEDICATED_WITH_SSL_OFFLOAD_15000_CONNECTIONS softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 25314 itemId 4136 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 849 setupFee 0 sort 0 capacity 200 description Citrix NetScaler VPX 10.5 200Mbps Platinum id 5916 itemTaxCategoryId 166 keyName CITRIX_NETSCALER_VPX_10_5_200MBPS_PLATINUM softwareDescriptionId 1378 units Mbps upgradeItemId prices currentPriceFlag id 44970 itemId 5916 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 899 setupFee 0 sort 0 capacity 64 description /64 Block Static Public IPv6 Addresses id 784 itemTaxCategoryId 166 keyName 64_BLOCK_STATIC_PUBLIC_IPV6_ADDRESSES softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag hourlyRecurringFee .007 id 26340 itemId 784 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 10 capacity 4 description .info - 4 year id 4007 itemTaxCategoryId 166 keyName DOMAIN_INFO_4_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 26584 itemId 4007 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 38.48 sort 3 capacity 50 description Load Balancer 50 connections id 565 itemTaxCategoryId 166 keyName LOAD_BALANCER_50_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 567 prices currentPriceFlag id 33021 itemId 565 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 29 setupFee 0 sort 0 capacity 0 description Reseller Domain Credit id 4065 itemTaxCategoryId 162 keyName RESELLER_DOMAIN_CREDIT softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 27728 itemId 4065 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 10 capacity 250 description CDN 250 GB Bandwidth id 892 itemTaxCategoryId 166 keyName CDN_250_GB_BANDWIDTH softwareDescriptionId units GB upgradeItemId prices currentPriceFlag id 27770 itemId 892 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 20 setupFee 0 sort 10 capacity 3 description .biz - 3 year id 3995 itemTaxCategoryId 166 keyName DOMAIN_BIZ_3_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 29940 itemId 3995 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 33 sort 2 capacity 0 description Dedicated Rack (Half) id 1302 itemTaxCategoryId 166 keyName DEDICATED_RACK_HALF softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 31299 itemId 1302 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 1100 setupFee 0 sort 0 capacity 1000 description Load Balancer 1,000 connections id 566 itemTaxCategoryId 166 keyName LOAD_BALANCER_1000_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 571 prices currentPriceFlag id 31322 itemId 566 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 499 setupFee 0 sort 0 capacity 5 description .info - 5 year id 4023 itemTaxCategoryId 166 keyName DOMAIN_INFO_5_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 31340 itemId 4023 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 48.1 sort 4 capacity 7 description .us - 7 year id 3999 itemTaxCategoryId 166 keyName DOMAIN_US_7_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 32381 itemId 3999 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 57.33 sort 6 capacity 0 description Windows 2008 Datacenter w/ Unlimited Virtuals (2 Proc License) id 791 itemTaxCategoryId 166 keyName VIRTUAL_OS_WINDOWS_2008_DATACENTER_W_UNLIMITED_VIRTUALS_2_PROC_LICENSE softwareDescriptionId 723 units N/A upgradeItemId prices currentPriceFlag id 32695 itemId 791 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 127.5 setupFee 0 sort 0 capacity 250 description Load Balancer 250 VIP Connections id 1106 itemTaxCategoryId 166 keyName LOAD_BALANCER_250_VIP_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 1105 prices currentPriceFlag id 33908 itemId 1106 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 29 setupFee 0 sort 0 capacity 1500 description 1500GB EVault Disk to Disk Enterprise Backup id 4185 itemTaxCategoryId 166 keyName EVAULT_1500_GB softwareDescriptionId 159 units GIGABYTE upgradeItemId 955 prices currentPriceFlag id 34594 itemId 4185 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 543.75 setupFee 0 sort 0 capacity 65000 description Dedicated Load Balancer with High Availability and SSL - 15,000 Connections id 4512 itemTaxCategoryId 166 keyName DEDICATED_LOAD_BALANCER_WITH_HIGH_AVAILABILITY_AND_SSL_15000_CONNECTIONS softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 36019 itemId 4512 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 1698 setupFee 0 sort 0 capacity 10 description Citrix NetScaler VPX 10.5 10Mbps Platinum id 5912 itemTaxCategoryId 166 keyName CITRIX_NETSCALER_VPX_10_5_10MBPS_PLATINUM softwareDescriptionId 1374 units Mbps upgradeItemId prices currentPriceFlag id 44950 itemId 5912 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 459 setupFee 0 sort 0 capacity 0 description EVault Plugin - VMWare ESX id 1087 itemTaxCategoryId 166 keyName EVAULT_PLUGIN_VMWARE_ESX softwareDescriptionId 635 upgradeItemId prices currentPriceFlag id 23040 itemId 1087 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 8.5 setupFee 0 sort 0 capacity 8 description 8 Static Public IP Addresses id 583 itemTaxCategoryId 166 keyName 8_STATIC_PUBLIC_IP_ADDRESSES softwareDescriptionId upgradeItemId prices currentPriceFlag hourlyRecurringFee 0 id 36692 itemId 583 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 0 currentPriceFlag id 29583 itemId 583 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 8 setupFee 0 sort 0 currentPriceFlag id 31207 itemId 583 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 8 setupFee 0 sort 0 currentPriceFlag hourlyRecurringFee .007 id 23392 itemId 583 laborFee 0 onSaleFlag oneTimeFee 0 quantity setupFee 0 sort 0 capacity 2000 description Load Balancer 2,000 connections id 617 itemTaxCategoryId 166 keyName LOAD_BALANCER_2000_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 618 prices currentPriceFlag id 23794 itemId 617 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 999 setupFee 0 sort 0 capacity 3 description .us - 3 year id 4011 itemTaxCategoryId 166 keyName DOMAIN_US_3_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 24083 itemId 4011 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 24.57 sort 2 capacity 2 description FortiGate Security Appliance (High Availability) id 4338 itemTaxCategoryId 166 keyName FORTIGATE_SECURITY_APPLIANCE_HIGH_AVAILABILITY softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 24741 itemId 4338 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 1699 setupFee 0 sort 1 capacity 100000 description Advanced Package id 3859 itemTaxCategoryId 166 keyName NETWORK_MESSAGE_DELIVERY_ADVANCED_PACKAGE longDescription <ul> <li>Overage - $0.00085</li> <li>Bounce Tracking</li> <li>Unsub Tracking</li> <li>Click Tracking</li> <li>Open Tracking</li> <li>Dedicated IP</li> </ul> softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 24995 itemId 3859 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 36 setupFee 0 sort 0 capacity 0 description Global IPv6 id 4187 itemTaxCategoryId 166 keyName GLOBAL_IPV6 softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 25282 itemId 4187 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 20 setupFee 0 sort 10 capacity 0 description CDN Pay as You Go Bandwidth id 891 itemTaxCategoryId 166 keyName CDN_PAY_AS_YOU_GO_BANDWIDTH softwareDescriptionId units GB upgradeItemId prices currentPriceFlag id 25482 itemId 891 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 5 capacity 2 description .net - 2 year id 4001 itemTaxCategoryId 166 keyName DOMAIN_NET_2_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 25495 itemId 4001 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 22 sort 1 capacity 1 description QuickSSL Premium - 1 year id 1293 itemTaxCategoryId 166 keyName SSL_CERTIFICATE_QUICKSSL_PREMIUM_1_YEAR longDescription <h3>GeoTrust QuickSSL&reg; Premium SSL</h3> <p> GeoTrust QuickSSL Premium Certificates are convenient and economical entry-level SSL Certificates. These certificates enable up to 256-bit encryption (depending on your client's browser capability and the cipher suite installed on your Web server) and are issued through an automated process that usually results in certificates being issued within minutes of being ordered. This process verifies that a certificate purchaser has appropriate administrative rights to a Web server's domain. </p> <h3>Features and Benefits</h3> <ul> <li>Domain-level authentication</li> <li>Up to 256-bit encryption</li> <li>True Site Seal embedded with a date/time stamp</li> <li>Issuance, often within minutes</li> <li>Unlimited, free self-service reissues</li> </ul> softwareDescriptionId units DV upgradeItemId prices currentPriceFlag id 25847 itemId 1293 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 79 sort 3 capacity 2 description Hardware Firewall (High Availability) id 3896 itemTaxCategoryId 166 keyName HARDWARE_FIREWALL_HIGH_AVAILABILITY softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 26005 itemId 3896 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 1398 setupFee 0 sort 0 capacity 3 description .org - 3 year id 4002 itemTaxCategoryId 166 keyName DOMAIN_ORG_3_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 26323 itemId 4002 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 30 sort 2 capacity 64 description /64 Block Portable Public IPv6 Addresses id 785 itemTaxCategoryId 166 keyName 64_BLOCK_PORTABLE_PUBLIC_IPV6_ADDRESSES softwareDescriptionId upgradeItemId prices currentPriceFlag id 28502 itemId 785 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 10 setupFee 0 sort 0 capacity 2500 description Load Balancer 2,500 VIP Connections id 1103 itemTaxCategoryId 166 keyName LOAD_BALANCER_2500_VIP_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 1102 prices currentPriceFlag id 30681 itemId 1103 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 299 setupFee 0 sort 0 capacity 500 description Load Balancer 500 VIP Connections id 1105 itemTaxCategoryId 166 keyName LOAD_BALANCER_500_VIP_CONNECTIONS softwareDescriptionId units Connections/Second upgradeItemId 1104 prices currentPriceFlag id 33416 itemId 1105 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 49 setupFee 0 sort 0 capacity 0 description Windows 2008 Datacenter w/ Unlimited Virtuals (4 Proc License) id 792 itemTaxCategoryId 166 keyName VIRTUAL_OS_WINDOWS_2008_DATACENTER_W_UNLIMITED_VIRTUALS_4_PROC_LICENSE softwareDescriptionId 723 units N/A upgradeItemId prices currentPriceFlag id 34006 itemId 792 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 255 setupFee 0 sort 0 capacity 1500 description Load Balancer 1,500 connections id 616 itemTaxCategoryId 166 keyName LOAD_BALANCER_1500_CONNECTIONS softwareDescriptionId upgradeItemId prices currentPriceFlag id 30815 itemId 616 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 749 setupFee 0 sort 0 capacity 2 description .com - 2 year id 4013 itemTaxCategoryId 166 keyName DOMAIN_COM_2_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 31127 itemId 4013 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 19.88 sort 1 capacity 10 description .net - 10 year id 4032 itemTaxCategoryId 166 keyName DOMAIN_NET_10_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 31647 itemId 4032 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 110 sort 9 description Hardware Firewall (Dedicated) id 1329 itemTaxCategoryId 166 keyName HARDWARE_FIREWALL_DEDICATED softwareDescriptionId units dedicated upgradeItemId prices currentPriceFlag id 33373 itemId 1329 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 699 setupFee 0 sort 0 capacity 2 description .org - 2 year id 3993 itemTaxCategoryId 166 keyName DOMAIN_ORG_2_YEAR softwareDescriptionId units N/A upgradeItemId prices currentPriceFlag id 22693 itemId 3993 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 20 sort 1 capacity 8 description 8 Portable Private IP Addresses id 3903 itemTaxCategoryId 166 keyName 8_PORTABLE_PRIVATE_IP_ADDRESSES softwareDescriptionId units IPs upgradeItemId prices currentPriceFlag id 23104 itemId 3903 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 0 setupFee 0 sort 0 capacity 1000 description 1000GB EVault Disk to Disk Enterprise Backup id 563 itemTaxCategoryId 166 keyName EVAULT_1000_GB softwareDescriptionId 159 units GIGABYTE upgradeItemId 4185 prices currentPriceFlag id 28124 itemId 563 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 362.5 setupFee 0 sort 0 capacity 1000 description Citrix NetScaler VPX 10.5 1000Mbps Standard id 5910 itemTaxCategoryId 166 keyName CITRIX_NETSCALER_VPX_10_5_1000MBPS_STANDARD softwareDescriptionId 1380 units Mbps upgradeItemId prices currentPriceFlag id 44938 itemId 5910 laborFee 0 onSaleFlag oneTimeFee 0 quantity recurringFee 999 setupFee 0 sort 0 ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Account_getAdcLoadBalancers.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Account_getAdcLoad0000664000175000017500000005451612701023453033617 0ustar kamikami00000000000000 accountId 411826 connectionLimit 50 connectionLimitUnits connections/second dedicatedFlag 0 id 76185 ipAddressId 7303289 securityCertificateId sslActiveFlag 0 sslEnabledFlag 0 highAvailabilityFlag 0 ipAddress id 7303289 ipAddress 108.168.157.130 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 1 note Load balancer VIP subnetId 406030 loadBalancerHardware accountId 1 bareMetalInstanceFlag 0 domain softlayer.com fullyQualifiedDomainName lb04.dal05.softlayer.com hardwareStatusId 5 hostname lb04.dal05 id 178860 notes provisionDate serviceProviderId 1 serviceProviderResourceId 178860 datacenter id 138124 longName Dallas 5 name dal05 statusId 2 virtualServers allocation 100 id 132193 port 80 routingMethodId virtualIpAddressId 76185 serviceGroups id 131113 routingMethodId 10 routingTypeId 2 timeout routingMethod id 10 keyname ROUND_ROBIN name Round Robin routingType id 2 keyname HTTP name HTTP services enabled 1 id 221145 ipAddressId 33499201 notes inst-test-276a.example.com port 8080 status DOWN ipAddress id 33499201 ipAddress 75.126.161.106 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 0 subnetId 22958 subnet broadcastAddress 75.126.161.111 cidr 29 gateway 75.126.161.105 id 22958 isCustomerOwned 0 isCustomerRoutable 0 modifyDate 2015-04-19T03:04:09+02:00 netmask 255.255.255.248 networkIdentifier 75.126.161.104 networkVlanId 815621 sortOrder 4 subnetType PRIMARY totalIpAddresses 8 usableIpAddressCount 5 version 4 accountId 411826 connectionLimit 50 connectionLimitUnits connections/second dedicatedFlag 0 id 76265 ipAddressId 25808370 securityCertificateId sslActiveFlag 0 sslEnabledFlag 0 highAvailabilityFlag 0 ipAddress id 25808370 ipAddress 159.8.117.72 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 1 note Load balancer VIP subnetId 717366 loadBalancerHardware accountId 1 bareMetalInstanceFlag 0 domain softlayer.com fullyQualifiedDomainName lb01.par01.softlayer.com hardwareStatusId 5 hostname lb01.par01 id 390016 notes provisionDate serviceProviderId 1 serviceProviderResourceId 390016 datacenter id 449500 longName Paris 1 name par01 statusId 2 virtualServers ././@LongLink0000000000000000000000000000025300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getBillingItem.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Applicatio0000664000175000017500000000513212701023453033740 0ustar kamikami00000000000000 allowCancellationFlag 1 cancellationDate categoryCode proxy_load_balancer createDate 2015-04-24T15:19:21+02:00 cycleStartDate 2015-04-24T15:19:21+02:00 description Load Balancer 50 connections id 50844215 laborFee 0 laborFeeTaxRate 0 lastBillDate 2015-04-24T15:19:21+02:00 modifyDate 2015-04-24T15:19:23+02:00 nextBillDate 2015-05-01T07:00:00+02:00 notes 1.2.3.4 oneTimeFee 0 oneTimeFeeTaxRate 0 orderItemId 65021573 parentId recurringFee 29 recurringFeeTaxRate 0 recurringMonths 1 serviceProviderId 1 setupFee 0 setupFeeTaxRate 0 ././@LongLink0000000000000000000000000000024700000000000011220 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_editObject.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Applicatio0000664000175000017500000000016512701023453033741 0ustar kamikami00000000000000 1 ././@LongLink0000000000000000000000000000024000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_deleteObject.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Applicatio0000664000175000017500000000016512701023453033741 0ustar kamikami00000000000000 1 ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Billing_Item_cancelService.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Billing_Item_cance0000664000175000017500000000016412701023453033631 0ustar kamikami00000000000000 1 ././@LongLink0000000000000000000000000000024600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getObject.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Applicatio0000664000175000017500000006660112701023453033750 0ustar kamikami00000000000000 accountId 411826 connectionLimit 50 connectionLimitUnits connections/second dedicatedFlag 0 id 76185 ipAddressId 7303289 securityCertificateId sslActiveFlag 0 sslEnabledFlag 0 highAvailabilityFlag 0 managedResourceFlag 0 virtualServers allocation 100 id 132193 port 80 routingMethodId virtualIpAddressId 76185 serviceGroups id 131113 routingMethodId 10 routingTypeId 2 timeout services enabled 1 id 226227 ipAddressId 29341260 port 8000 status UP groupReferences serviceGroupId 131113 serviceId 226227 weight 1 ipAddress id 29341260 ipAddress 10.126.5.34 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 0 subnetId 730826 subnet broadcastAddress 10.126.5.63 cidr 26 gateway 10.126.5.1 id 730826 isCustomerOwned 0 isCustomerRoutable 0 modifyDate 2015-04-19T03:14:26+02:00 netmask 255.255.255.192 networkIdentifier 10.126.5.0 networkVlanId 728518 sortOrder 4 subnetType PRIMARY totalIpAddresses 64 usableIpAddressCount 61 version 4 enabled 1 id 226229 ipAddressId 29341262 port 8080 status UP groupReferences serviceGroupId 131113 serviceId 226229 weight 1 ipAddress id 29341262 ipAddress 10.126.5.35 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 0 subnetId 730826 subnet broadcastAddress 10.126.5.63 cidr 26 gateway 10.126.5.1 id 730826 isCustomerOwned 0 isCustomerRoutable 0 modifyDate 2015-04-19T03:14:26+02:00 netmask 255.255.255.192 networkIdentifier 10.126.5.0 networkVlanId 728518 sortOrder 4 subnetType PRIMARY totalIpAddresses 64 usableIpAddressCount 61 version 4 enabled 1 id 221145 ipAddressId 33499201 notes inst-test-276a.example.com port 8080 status UP groupReferences serviceGroupId 131113 serviceId 221145 weight 1 ipAddress id 33499201 ipAddress 75.75.75.75 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 0 subnetId 22958 subnet broadcastAddress 75.75.75.75 cidr 29 gateway 75.75.75.75 id 22958 isCustomerOwned 0 isCustomerRoutable 0 modifyDate 2015-04-19T03:04:09+02:00 netmask 255.255.255.248 networkIdentifier 75.75.75.75 networkVlanId 815621 sortOrder 4 subnetType PRIMARY totalIpAddresses 8 usableIpAddressCount 5 version 4 ././@LongLink0000000000000000000000000000017600000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Subnet_IpAddress_getByIpAddress.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Subnet_IpA0000664000175000017500000000537612701023453033656 0ustar kamikami00000000000000 id 29341260 ipAddress 10.126.5.34 isBroadcast 0 isGateway 0 isNetwork 0 isReserved 0 subnetId 730826 subnet broadcastAddress 10.126.5.63 cidr 26 gateway 10.126.5.1 id 730826 isCustomerOwned 0 isCustomerRoutable 0 modifyDate 2015-04-19T03:14:26+02:00 netmask 255.255.255.192 networkIdentifier 10.126.5.0 networkVlanId 728518 sortOrder 4 subnetType PRIMARY totalIpAddresses 64 usableIpAddressCount 61 version 4 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/0000775000175000017500000000000013160535110024203 5ustar kamikami00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_target_groups.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_target_groups.x0000664000175000017500000000275513153541406033767 0ustar kamikami00000000000000 arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/TEST-TARGET-GROUP1/1111111111111111 5 traffic-port 200 TEST-TARGET-GROUP1 HTTPS /apps/health HTTPS 1443 vpc-11111111 5 30 arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/1111111111111111 2 20eca3ac-a03c-11e6-a91a-7f816344a331 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_listeters.xml0000664000175000017500000000255613153541406033450 0ustar kamikami00000000000000 arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/1111111111111111 HTTPS arn:aws:iam::111111111111:server-certificate/test.certificate 1443 arn:aws:elasticloadbalancing:us-east-1:111111111111:listener/app/Test-ALB/1111111111111111/1111111111111111 ELBSecurityPolicy-2015-05 forward arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/TEST-TARGET-GROUP1/1111111111111111 77834e15-a03a-11e6-a23e-41d3227189e3 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_rules.xml0000664000175000017500000000171013153541406032553 0ustar kamikami00000000000000 true default forward arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/TEST-TARGET-GROUP1/1111111111111111 arn:aws:elasticloadbalancing:us-east-1:111111111111:listener-rule/app/Test-Develop-App-LB/1111111111111111/1111111111111111/1111111111111111 a6519d8d-a03b-11e6-90ef-9138cf7efdbc apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_tags.xml0000664000175000017500000000127513153541406027537 0ustar kamikami00000000000000 arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/111111111111 lima project 34cba10d-a040-11e6-9d2a-7fea45a4c556 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_target_health.xml0000664000175000017500000000132413153541406031407 0ustar kamikami00000000000000 3443 healthy 3443 i-01111111111111111 3e63f250-a03f-11e6-b07a-97abc256eba0 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/alb/describe_load_balancers.xml0000664000175000017500000000231113153541406031522 0ustar kamikami00000000000000 arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/1111111111111111 internal subnet-11111111 us-east-1b internal-Test-ALB-1111111111.us-east-1.elb.amazonaws.com application Test-ALB vpc-11111111 11111111111111 2016-10-02T20:11:22.980Z sg-11111111 active 00904965-a037-11e6-b59a-ff82c1d23c14 apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/0000775000175000017500000000000013160535110026264 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_pool.xml0000664000175000017500000000260312704221640033343 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b- 6116f11291d0 myDevelopmentPool.1 Pool for load balancing development application servers. ROUND_ROBIN RESELECT 10 NORMAL 2015-06-04T09:15:07.000Z 553f26b6-2a73-42c3-a78b- 6116f11291d0 myProductionPool.1 Pool for load balancing production application servers. ROUND_ROBIN NONE 10 NORMAL 2015-06-03T14:11:17.000Z ././@LongLink0000000000000000000000000000021500000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember_0000664000175000017500000000102712704221640034032 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b-6116f11291d0 9889 ENABLED NORMAL 2015-06-09T11:02:50.000Z apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/oec_0_9_myaccount.xml0000664000175000017500000000303412701023453032307 0ustar kamikami00000000000000 testuser Test User Test User test@example.com 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 create image reports server primary administrator network ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deletePool.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deletePool.0000664000175000017500000000050512704221640033744 0ustar kamikami00000000000000 DELETE_POOL OK Pool (id:4d360b1f-bc2c-4ab7-9884-1f03ba2768f7) has been deleted. ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_pool_4d360b0000664000175000017500000000130412704221640033523 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b-6116f11291d0 myDevelopmentPool.1 Pool for load balancing development application servers. ROUND_ROBIN RESELECT 10 NORMAL 2015-06-04T09:15:07.000Z ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultIrule.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultIrul0000664000175000017500000000334012704221640034052 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteVirtualListener.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteVirtu0000664000175000017500000000053512704221640034071 0ustar kamikami00000000000000 DELETE_VIRTUAL_LISTENER OK Virtual Listener (id:6115469d-a8bb-445b-bb23-d23b5283f2b9) has been deleted. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createNode.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createNode.0000664000175000017500000000066112704221640033724 0ustar kamikami00000000000000 CREATE_NODE OK Node 'myProductionNode.1' has been created. ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultHealthMonitor.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultHeal0000664000175000017500000000255212704221640034014 0ustar kamikami00000000000000 CCDEFAULT.Http false true CCDEFAULT.Https false true CCDEFAULT.Icmp true false CCDEFAULT.Tcp false true CCDEFAULT.TcpHalfOpen false true CCDEFAULT.Udp false true ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_removePoolMember.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_removePoolM0000664000175000017500000000052212704221640034035 0ustar kamikami00000000000000 REMOVE_POOL_MEMBER OK Pool Member (id:34de6ed6-46a4-4dae-a753-2f8d3840c6f9) has been removed. apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_node.xml0000664000175000017500000000216612704221640033323 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b- 6116f11291d0 ProductionNode.1 Production Server 1 10.10.10.101 NORMAL ENABLED 10000 2000 2015-05-29T13:56:13.000Z 553f26b6-2a73-42c3-a78b- 6116f11291d0 ProductionNode.2 Production Server 2 10.5.2.15 NORMAL ENABLED 10000 2000 2015-05-29T11:46:55.000Z ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editNode.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editNode.xm0000664000175000017500000000060412704221640033750 0ustar kamikami00000000000000 EDIT_NODE OK Node 'ProductionNode.2' has been edited successfully. ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultPersistenceProfile.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultPers0000664000175000017500000000376412704221640034062 0ustar kamikami00000000000000 CCDEFAULT.Cookie CCDEFAULT.DestinationAddress CCDEFAULT.Sip CCDEFAULT.SourceAddress ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_addPoolMember.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_addPoolMemb0000664000175000017500000000066112704221640033760 0ustar kamikami00000000000000 ADD_POOL_MEMBER OK Pool Member '10.0.3.13:9888' has been added. ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_node_34de6e0000664000175000017500000000112712704221640033572 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b-6116f11291d0 ProductionNode.2 Production Server 2 10.10.10.101 NORMAL ENABLED 10000 2000 2015-05-27T13:56:13.000Z ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteNode.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteNode.0000664000175000017500000000050512704221640033720 0ustar kamikami00000000000000 DELETE_NODE OK Node (id:34de6ed6-46a4-4dae-a753-2f8d3840c6f9) has been deleted. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createPool.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createPool.0000664000175000017500000000066212704221640033751 0ustar kamikami00000000000000 CREATE_POOL OK Pool 'myProductionPool.1' has been created. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPool.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPool.xm0000664000175000017500000000064512704221640034001 0ustar kamikami00000000000000 EDIT_POOL OK Pool 'ProductionPool.2' has been updated. ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualListener.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualList0000664000175000017500000000366712704221640034130 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b- 6116f11291d0 myProduction.Virtual.Listener NORMAL Virtual Listener for load balancing our test systems. 2015-05-28T15:59:49.000Z PERFORMANCE_LAYER_4 HTTP 165.180.12.22 8899 true 10000 400 PRESERVE ROUND_ROBIN NONE 10 ROUND_ROBIN RESELECT 10 ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createVirtualListener.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createVirtu0000664000175000017500000000104512704221640034067 0ustar kamikami00000000000000 CREATE_VIRTUAL_LISTENER OK Virtual Listener 'Production.Load.Balancer' has been created on IP 165.180.12.22. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember.0000664000175000017500000000207312704221640033753 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b- 6116f11291d0 9889 ENABLED NORMAL 2015-06-09T11:02:50.000Z 553f26b6-2a73-42c3-a78b- 6116f11291d0 9888 ENABLED NORMAL 2015-06-09T10:43:29.000Z ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPoolMember.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPoolMem0000664000175000017500000000061112704221640034006 0ustar kamikami00000000000000 EDIT_POOL_MEMBER OK Pool Member (10.0.3.13:9889) has been edited. ././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9.xmlapache-libcloud-2.2.1/libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualList0000664000175000017500000000352412704221640034120 0ustar kamikami00000000000000 553f26b6-2a73-42c3-a78b-6116f11291d0 myProduction.Virtual.Listener NORMAL Virtual Listener for load balancing our test systems. 2015-05-28T15:59:49.000Z PERFORMANCE_LAYER_4 HTTP 165.180.12.22 8899 true 10000 400 PRESERVE ROUND_ROBIN NONE 10 ROUND_ROBIN RESELECT 10 apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_gce.py0000664000175000017500000002133513153541406023756 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for Google Compute Engine Load Balancer Driver """ import sys import unittest from libcloud.common.google import GoogleBaseAuthConnection from libcloud.compute.drivers.gce import (GCENodeDriver) from libcloud.loadbalancer.drivers.gce import (GCELBDriver) from libcloud.test.common.test_google import GoogleAuthMockHttp, GoogleTestCase from libcloud.test.compute.test_gce import GCEMockHttp from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS class GCELoadBalancerTest(GoogleTestCase): datacenter = 'us-central1-a' def setUp(self): GCEMockHttp.test = self GCELBDriver.connectionCls.conn_class = GCEMockHttp GCENodeDriver.connectionCls.conn_class = GCEMockHttp GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp GCEMockHttp.type = None kwargs = GCE_KEYWORD_PARAMS.copy() kwargs['auth_type'] = 'IA' kwargs['datacenter'] = self.datacenter self.driver = GCELBDriver(*GCE_PARAMS, **kwargs) def test_get_node_from_ip(self): ip = '23.236.58.15' expected_name = 'node-name' node = self.driver._get_node_from_ip(ip) self.assertEqual(node.name, expected_name) dummy_ip = '8.8.8.8' node = self.driver._get_node_from_ip(dummy_ip) self.assertTrue(node is None) def test_list_protocols(self): expected_protocols = ['TCP', 'UDP'] protocols = self.driver.list_protocols() self.assertEqual(protocols, expected_protocols) def test_list_balancers(self): balancers = self.driver.list_balancers() balancers_all = self.driver.list_balancers(ex_region='all') balancer_name = 'lcforwardingrule' self.assertEqual(len(balancers), 2) self.assertEqual(len(balancers_all), 2) self.assertEqual(balancers[0].name, balancer_name) def test_create_balancer(self): balancer_name = 'libcloud-lb-demo-lb' tp_name = '%s-tp' % (balancer_name) port = '80' protocol = 'tcp' algorithm = None node0 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-000', 'us-central1-b') node1 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') members = [node0, node1] balancer = self.driver.create_balancer(balancer_name, port, protocol, algorithm, members) self.assertEqual(balancer.name, balancer_name) self.assertEqual(balancer.extra['targetpool'].name, tp_name) self.assertEqual(len(balancer.list_members()), 3) def test_destory_balancer(self): balancer_name = 'lcforwardingrule' balancer = self.driver.get_balancer(balancer_name) destroyed = balancer.destroy() self.assertTrue(destroyed) def test_get_balancer(self): balancer_name = 'lcforwardingrule' tp_name = 'lctargetpool' balancer_ip = '173.255.119.224' balancer = self.driver.get_balancer(balancer_name) self.assertEqual(balancer.name, balancer_name) self.assertEqual(balancer.extra['forwarding_rule'].name, balancer_name) self.assertEqual(balancer.ip, balancer_ip) self.assertEqual(balancer.extra['targetpool'].name, tp_name) def test_attach_compute_node(self): node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') balancer = self.driver.get_balancer('lcforwardingrule') member = self.driver._node_to_member(node, balancer) # Detach member first balancer.detach_member(member) self.assertEqual(len(balancer.list_members()), 1) # Attach Node balancer.attach_compute_node(node) self.assertEqual(len(balancer.list_members()), 2) def test_detach_attach_member(self): node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') balancer = self.driver.get_balancer('lcforwardingrule') member = self.driver._node_to_member(node, balancer) # Check that balancer has 2 members self.assertEqual(len(balancer.list_members()), 2) # Remove a member and check that it now has 1 member balancer.detach_member(member) self.assertEqual(len(balancer.list_members()), 1) # Reattach member and check that it has 2 members again balancer.attach_member(member) self.assertEqual(len(balancer.list_members()), 2) def test_balancer_list_members(self): balancer = self.driver.get_balancer('lcforwardingrule') members = balancer.list_members() self.assertEqual(len(members), 2) member_ips = [m.ip for m in members] self.assertTrue('23.236.58.15' in member_ips) def test_ex_create_healthcheck(self): healthcheck_name = 'lchealthcheck' kwargs = {'host': 'lchost', 'path': '/lc', 'port': 8000, 'interval': 10, 'timeout': 10, 'unhealthy_threshold': 4, 'healthy_threshold': 3} hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs) self.assertEqual(hc.name, healthcheck_name) self.assertEqual(hc.path, '/lc') self.assertEqual(hc.port, 8000) self.assertEqual(hc.interval, 10) def test_ex_list_healthchecks(self): healthchecks = self.driver.ex_list_healthchecks() self.assertEqual(len(healthchecks), 3) self.assertEqual(healthchecks[0].name, 'basic-check') def test_ex_balancer_detach_attach_healthcheck(self): healthcheck = self.driver.gce.ex_get_healthcheck( 'libcloud-lb-demo-healthcheck') balancer = self.driver.get_balancer('lcforwardingrule') healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) self.assertEqual(len(healthchecks), 1) # Detach Healthcheck detach_healthcheck = self.driver.ex_balancer_detach_healthcheck( balancer, healthcheck) self.assertTrue(detach_healthcheck) healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) self.assertEqual(len(healthchecks), 0) # Reattach Healthcheck attach_healthcheck = self.driver.ex_balancer_attach_healthcheck( balancer, healthcheck) self.assertTrue(attach_healthcheck) healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) self.assertEqual(len(healthchecks), 1) def test_ex_balancer_list_healthchecks(self): balancer = self.driver.get_balancer('lcforwardingrule') healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) self.assertEqual(healthchecks[0].name, 'libcloud-lb-demo-healthcheck') def test_node_to_member(self): node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', 'us-central1-b') balancer = self.driver.get_balancer('lcforwardingrule') member = self.driver._node_to_member(node, balancer) self.assertEqual(member.ip, node.public_ips[0]) self.assertEqual(member.id, node.name) self.assertEqual(member.port, balancer.port) def test_node_to_member_no_pub_ip(self): node = self.driver.gce.ex_get_node('libcloud-lb-nopubip-001', 'us-central1-b') balancer = self.driver.get_balancer('lcforwardingrule') member = self.driver._node_to_member(node, balancer) self.assertIsNone(member.ip) def test_forwarding_rule_to_loadbalancer(self): fwr = self.driver.gce.ex_get_forwarding_rule('lcforwardingrule') balancer = self.driver._forwarding_rule_to_loadbalancer(fwr) self.assertEqual(fwr.name, balancer.name) self.assertEqual(fwr.address, balancer.ip) self.assertEqual(fwr.extra['portRange'], balancer.port) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_rackspace.py0000664000175000017500000020320713153541406025154 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import datetime try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm from libcloud.loadbalancer.types import MemberCondition from libcloud.loadbalancer.drivers.rackspace import RackspaceLBDriver, \ RackspaceHealthMonitor, RackspaceHTTPHealthMonitor, \ RackspaceConnectionThrottle, RackspaceAccessRule from libcloud.loadbalancer.drivers.rackspace import RackspaceAccessRuleType from libcloud.common.types import LibcloudError from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.file_fixtures import LoadBalancerFileFixtures from libcloud.test.file_fixtures import OpenStackFixtures class RackspaceLBTests(unittest.TestCase): def setUp(self): RackspaceLBDriver.connectionCls.conn_class = RackspaceLBMockHttp RackspaceLBMockHttp.type = None self.driver = RackspaceLBDriver('user', 'key') self.driver.connection.poll_interval = 0.0 # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() def test_force_auth_token_kwargs(self): base_url = 'https://ord.loadbalancer.api.rackspacecloud.com/v1.0/11111' kwargs = { 'ex_force_auth_token': 'some-auth-token', 'ex_force_base_url': base_url } driver = RackspaceLBDriver('user', 'key', **kwargs) driver.list_balancers() self.assertEqual(kwargs['ex_force_auth_token'], driver.connection.auth_token) self.assertEqual('/v1.0/11111', driver.connection.request_path) def test_force_auth_url_kwargs(self): kwargs = { 'ex_force_auth_version': '2.0', 'ex_force_auth_url': 'https://identity.api.rackspace.com' } driver = RackspaceLBDriver('user', 'key', **kwargs) self.assertEqual(kwargs['ex_force_auth_url'], driver.connection._ex_force_auth_url) self.assertEqual(kwargs['ex_force_auth_version'], driver.connection._auth_version) def test_gets_auth_2_0_endpoint_defaults_to_ord_region(self): driver = RackspaceLBDriver('user', 'key', ex_force_auth_version='2.0_password' ) driver.connection._populate_hosts_and_request_paths() self.assertEqual( 'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111', driver.connection.get_endpoint()) def test_gets_auth_2_0_endpoint_for_dfw(self): driver = RackspaceLBDriver('user', 'key', ex_force_auth_version='2.0_password', ex_force_region='dfw' ) driver.connection._populate_hosts_and_request_paths() self.assertEqual( 'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111', driver.connection.get_endpoint()) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 10) self.assertTrue('http' in protocols) def test_ex_list_protocols_with_default_ports(self): protocols = self.driver.ex_list_protocols_with_default_ports() self.assertEqual(len(protocols), 10) self.assertTrue(('http', 80) in protocols) def test_list_supported_algorithms(self): algorithms = self.driver.list_supported_algorithms() self.assertTrue(Algorithm.RANDOM in algorithms) self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) self.assertTrue(Algorithm.WEIGHTED_ROUND_ROBIN in algorithms) self.assertTrue(Algorithm.WEIGHTED_LEAST_CONNECTIONS in algorithms) def test_ex_list_algorithms(self): algorithms = self.driver.ex_list_algorithm_names() self.assertTrue("RANDOM" in algorithms) self.assertTrue("ROUND_ROBIN" in algorithms) self.assertTrue("LEAST_CONNECTIONS" in algorithms) self.assertTrue("WEIGHTED_ROUND_ROBIN" in algorithms) self.assertTrue("WEIGHTED_LEAST_CONNECTIONS" in algorithms) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 2) self.assertEqual(balancers[0].name, "test0") self.assertEqual(balancers[0].id, "8155") self.assertEqual(balancers[0].port, 80) self.assertEqual(balancers[0].ip, "1.1.1.25") self.assertTrue(balancers[0].extra.get('service_name') is not None) self.assertTrue(balancers[0].extra.get('uri') is not None) self.assertEqual(balancers[1].name, "test1") self.assertEqual(balancers[1].id, "8156") def test_list_balancers_ex_member_address(self): RackspaceLBMockHttp.type = 'EX_MEMBER_ADDRESS' balancers = self.driver.list_balancers(ex_member_address='127.0.0.1') self.assertEqual(len(balancers), 3) self.assertEqual(balancers[0].name, "First Loadbalancer") self.assertEqual(balancers[0].id, "1") self.assertEqual(balancers[1].name, "Second Loadbalancer") self.assertEqual(balancers[1].id, "2") self.assertEqual(balancers[2].name, "Third Loadbalancer") self.assertEqual(balancers[2].id, "8") def test_create_balancer(self): balancer = self.driver.create_balancer(name='test2', port=80, algorithm=Algorithm.ROUND_ROBIN, members=( Member( None, '10.1.0.10', 80, extra={'condition': MemberCondition.DISABLED, 'weight': 10}), Member(None, '10.1.0.11', 80)) ) self.assertEqual(balancer.name, 'test2') self.assertEqual(balancer.id, '8290') def test_ex_create_balancer(self): RackspaceLBDriver.connectionCls.conn_class = RackspaceLBWithVIPMockHttp RackspaceLBMockHttp.type = None driver = RackspaceLBDriver('user', 'key') balancer = driver.ex_create_balancer(name='test2', port=80, algorithm=Algorithm.ROUND_ROBIN, members=( Member( None, '10.1.0.11', 80),), vip='12af' ) self.assertEqual(balancer.name, 'test2') self.assertEqual(balancer.id, '8290') def test_destroy_balancer(self): balancer = self.driver.list_balancers()[0] ret = self.driver.destroy_balancer(balancer) self.assertTrue(ret) def test_ex_destroy_balancers(self): balancers = self.driver.list_balancers() ret = self.driver.ex_destroy_balancers(balancers) self.assertTrue(ret) def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='8290') self.assertEqual(balancer.name, 'test2') self.assertEqual(balancer.id, '8290') def test_get_balancer_extra_vips(self): balancer = self.driver.get_balancer(balancer_id='18940') self.assertEqual(balancer.extra["virtualIps"], [{"address": "50.56.49.149", "id": 2359, "type": "PUBLIC", "ipVersion": "IPV4"}]) def test_get_balancer_extra_public_source_ipv4(self): balancer = self.driver.get_balancer(balancer_id='18940') self.assertEqual(balancer.extra["ipv4PublicSource"], '184.106.100.25') def test_get_balancer_extra_public_source_ipv6(self): balancer = self.driver.get_balancer(balancer_id='18940') self.assertEqual(balancer.extra["ipv6PublicSource"], '2001:4801:7901::6/64') def test_get_balancer_extra_private_source_ipv4(self): balancer = self.driver.get_balancer(balancer_id='18940') self.assertEqual(balancer.extra["ipv4PrivateSource"], '10.183.252.25') def test_get_balancer_extra_members(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.extra['members'] self.assertEqual(3, len(members)) self.assertEqual('10.1.0.11', members[0].ip) self.assertEqual('10.1.0.10', members[1].ip) self.assertEqual('10.1.0.9', members[2].ip) def test_get_balancer_extra_created(self): balancer = self.driver.get_balancer(balancer_id='8290') created_8290 = datetime.datetime(2011, 4, 7, 16, 27, 50) self.assertEqual(created_8290, balancer.extra['created']) def test_get_balancer_extra_updated(self): balancer = self.driver.get_balancer(balancer_id='8290') updated_8290 = datetime.datetime(2011, 4, 7, 16, 28, 12) self.assertEqual(updated_8290, balancer.extra['updated']) def test_get_balancer_extra_access_list(self): balancer = self.driver.get_balancer(balancer_id='94698') access_list = balancer.extra['accessList'] self.assertEqual(3, len(access_list)) self.assertEqual(2883, access_list[0].id) self.assertEqual("0.0.0.0/0", access_list[0].address) self.assertEqual(RackspaceAccessRuleType.DENY, access_list[0].rule_type) self.assertEqual(2884, access_list[1].id) self.assertEqual("2001:4801:7901::6/64", access_list[1].address) self.assertEqual(RackspaceAccessRuleType.ALLOW, access_list[1].rule_type) self.assertEqual(3006, access_list[2].id) self.assertEqual("8.8.8.8/0", access_list[2].address) self.assertEqual(RackspaceAccessRuleType.DENY, access_list[2].rule_type) def test_get_balancer_algorithm(self): balancer = self.driver.get_balancer(balancer_id='8290') self.assertEqual(balancer.extra["algorithm"], Algorithm.RANDOM) def test_get_balancer_protocol(self): balancer = self.driver.get_balancer(balancer_id='94695') self.assertEqual(balancer.extra['protocol'], 'HTTP') def test_get_balancer_weighted_round_robin_algorithm(self): balancer = self.driver.get_balancer(balancer_id='94692') self.assertEqual(balancer.extra["algorithm"], Algorithm.WEIGHTED_ROUND_ROBIN) def test_get_balancer_weighted_least_connections_algorithm(self): balancer = self.driver.get_balancer(balancer_id='94693') self.assertEqual(balancer.extra["algorithm"], Algorithm.WEIGHTED_LEAST_CONNECTIONS) def test_get_balancer_unknown_algorithm(self): balancer = self.driver.get_balancer(balancer_id='94694') self.assertFalse('algorithm' in balancer.extra) def test_get_balancer_connect_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='94695') balancer_health_monitor = balancer.extra["healthMonitor"] self.assertEqual(balancer_health_monitor.type, "CONNECT") self.assertEqual(balancer_health_monitor.delay, 10) self.assertEqual(balancer_health_monitor.timeout, 5) self.assertEqual(balancer_health_monitor.attempts_before_deactivation, 2) def test_get_balancer_http_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='94696') balancer_health_monitor = balancer.extra["healthMonitor"] self.assertEqual(balancer_health_monitor.type, "HTTP") self.assertEqual(balancer_health_monitor.delay, 10) self.assertEqual(balancer_health_monitor.timeout, 5) self.assertEqual(balancer_health_monitor.attempts_before_deactivation, 2) self.assertEqual(balancer_health_monitor.path, "/") self.assertEqual(balancer_health_monitor.status_regex, "^[234][0-9][0-9]$") self.assertEqual(balancer_health_monitor.body_regex, "Hello World!") def test_get_balancer_https_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='94697') balancer_health_monitor = balancer.extra["healthMonitor"] self.assertEqual(balancer_health_monitor.type, "HTTPS") self.assertEqual(balancer_health_monitor.delay, 15) self.assertEqual(balancer_health_monitor.timeout, 12) self.assertEqual(balancer_health_monitor.attempts_before_deactivation, 5) self.assertEqual(balancer_health_monitor.path, "/test") self.assertEqual(balancer_health_monitor.status_regex, "^[234][0-9][0-9]$") self.assertEqual(balancer_health_monitor.body_regex, "abcdef") def test_get_balancer_connection_throttle(self): balancer = self.driver.get_balancer(balancer_id='94695') balancer_connection_throttle = balancer.extra["connectionThrottle"] self.assertEqual(balancer_connection_throttle.min_connections, 50) self.assertEqual(balancer_connection_throttle.max_connections, 200) self.assertEqual(balancer_connection_throttle.max_connection_rate, 50) self.assertEqual(balancer_connection_throttle.rate_interval_seconds, 10) def test_get_session_persistence(self): balancer = self.driver.get_balancer(balancer_id='94695') self.assertEqual(balancer.extra["sessionPersistenceType"], "HTTP_COOKIE") def test_get_connection_logging(self): balancer = self.driver.get_balancer(balancer_id='94695') self.assertEqual(balancer.extra["connectionLoggingEnabled"], True) def test_get_error_page(self): balancer = self.driver.get_balancer(balancer_id='18940') error_page = self.driver.ex_get_balancer_error_page(balancer) self.assertTrue("The service is temporarily unavailable" in error_page) def test_get_access_list(self): balancer = self.driver.get_balancer(balancer_id='18940') deny_rule, allow_rule = self.driver.ex_balancer_access_list(balancer) self.assertEqual(deny_rule.id, 2883) self.assertEqual(deny_rule.rule_type, RackspaceAccessRuleType.DENY) self.assertEqual(deny_rule.address, "0.0.0.0/0") self.assertEqual(allow_rule.id, 2884) self.assertEqual(allow_rule.address, "2001:4801:7901::6/64") self.assertEqual(allow_rule.rule_type, RackspaceAccessRuleType.ALLOW) def test_ex_create_balancer_access_rule(self): balancer = self.driver.get_balancer(balancer_id='94698') rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, address='0.0.0.0/0') rule = self.driver.ex_create_balancer_access_rule(balancer, rule) self.assertEqual(2883, rule.id) def test_ex_create_balancer_access_rule_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94698') rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, address='0.0.0.0/0') resp = self.driver.ex_create_balancer_access_rule_no_poll(balancer, rule) self.assertTrue(resp) def test_ex_create_balancer_access_rules(self): balancer = self.driver.get_balancer(balancer_id='94699') rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW, address='2001:4801:7901::6/64'), RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, address='8.8.8.8/0')] rules = self.driver.ex_create_balancer_access_rules(balancer, rules) self.assertEqual(2, len(rules)) self.assertEqual(2884, rules[0].id) self.assertEqual(3006, rules[1].id) def test_ex_create_balancer_access_rules_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94699') rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW, address='2001:4801:7901::6/64'), RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, address='8.8.8.8/0')] resp = self.driver.ex_create_balancer_access_rules_no_poll(balancer, rules) self.assertTrue(resp) def test_ex_destroy_balancer_access_rule(self): balancer = self.driver.get_balancer(balancer_id='94698') rule = RackspaceAccessRule(id='1007', rule_type=RackspaceAccessRuleType.ALLOW, address="10.45.13.5/12" ) balancer = self.driver.ex_destroy_balancer_access_rule(balancer, rule) rule_ids = [r.id for r in balancer.extra['accessList']] self.assertTrue(1007 not in rule_ids) def test_ex_destroy_balancer_access_rule_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94698') rule = RackspaceAccessRule(id=1007, rule_type=RackspaceAccessRuleType.ALLOW, address="10.45.13.5/12" ) resp = self.driver.ex_destroy_balancer_access_rule_no_poll(balancer, rule) self.assertTrue(resp) def test_ex_destroy_balancer_access_rules(self): balancer = self.driver.get_balancer(balancer_id='94699') balancer = self.driver.ex_destroy_balancer_access_rules(balancer, balancer.extra['accessList']) self.assertEqual('94699', balancer.id) def test_ex_destroy_balancer_access_rules_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94699') resp = self.driver.ex_destroy_balancer_access_rules_no_poll(balancer, balancer.extra['accessList']) self.assertTrue(resp) def test_ex_update_balancer_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='94695') monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5, attempts_before_deactivation=2) balancer = self.driver.ex_update_balancer_health_monitor( balancer, monitor) updated_monitor = balancer.extra['healthMonitor'] self.assertEqual('CONNECT', updated_monitor.type) self.assertEqual(10, updated_monitor.delay) self.assertEqual(5, updated_monitor.timeout) self.assertEqual(2, updated_monitor.attempts_before_deactivation) def test_ex_update_balancer_http_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='94696') monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, attempts_before_deactivation=2, path='/', status_regex='^[234][0-9][0-9]$', body_regex='Hello World!') balancer = self.driver.ex_update_balancer_health_monitor( balancer, monitor) updated_monitor = balancer.extra['healthMonitor'] self.assertEqual('HTTP', updated_monitor.type) self.assertEqual(10, updated_monitor.delay) self.assertEqual(5, updated_monitor.timeout) self.assertEqual(2, updated_monitor.attempts_before_deactivation) self.assertEqual('/', updated_monitor.path) self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex) self.assertEqual('Hello World!', updated_monitor.body_regex) def test_ex_update_balancer_health_monitor_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94695') monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5, attempts_before_deactivation=2) resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer, monitor) self.assertTrue(resp) def test_ex_update_balancer_http_health_monitor_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94696') monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, attempts_before_deactivation=2, path='/', status_regex='^[234][0-9][0-9]$', body_regex='Hello World!') resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer, monitor) self.assertTrue(resp) def test_ex_update_balancer_http_health_monitor_with_no_option_body_regex(self): balancer = self.driver.get_balancer(balancer_id='94700') monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, attempts_before_deactivation=2, path='/', status_regex='^[234][0-9][0-9]$', body_regex='') balancer = self.driver.ex_update_balancer_health_monitor( balancer, monitor) updated_monitor = balancer.extra['healthMonitor'] self.assertEqual('HTTP', updated_monitor.type) self.assertEqual(10, updated_monitor.delay) self.assertEqual(5, updated_monitor.timeout) self.assertEqual(2, updated_monitor.attempts_before_deactivation) self.assertEqual('/', updated_monitor.path) self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex) self.assertEqual('', updated_monitor.body_regex) def test_ex_disable_balancer_health_monitor(self): balancer = self.driver.get_balancer(balancer_id='8290') balancer = self.driver.ex_disable_balancer_health_monitor(balancer) self.assertTrue('healthMonitor' not in balancer.extra) def test_ex_disable_balancer_health_monitor_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') resp = self.driver.ex_disable_balancer_health_monitor_no_poll(balancer) self.assertTrue(resp) def test_ex_update_balancer_connection_throttle(self): balancer = self.driver.get_balancer(balancer_id='94695') connection_throttle = RackspaceConnectionThrottle(max_connections=200, min_connections=50, max_connection_rate=50, rate_interval_seconds=10) balancer = self.driver.ex_update_balancer_connection_throttle(balancer, connection_throttle) updated_throttle = balancer.extra['connectionThrottle'] self.assertEqual(200, updated_throttle.max_connections) self.assertEqual(50, updated_throttle.min_connections) self.assertEqual(50, updated_throttle.max_connection_rate) self.assertEqual(10, updated_throttle.rate_interval_seconds) def test_ex_update_balancer_connection_throttle_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94695') connection_throttle = RackspaceConnectionThrottle(max_connections=200, min_connections=50, max_connection_rate=50, rate_interval_seconds=10) resp = self.driver.ex_update_balancer_connection_throttle_no_poll( balancer, connection_throttle) self.assertTrue(resp) def test_ex_disable_balancer_connection_throttle(self): balancer = self.driver.get_balancer(balancer_id='8290') balancer = self.driver.ex_disable_balancer_connection_throttle( balancer) self.assertTrue('connectionThrottle' not in balancer.extra) def test_ex_disable_balancer_connection_throttle_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') resp = self.driver.ex_disable_balancer_connection_throttle_no_poll( balancer) self.assertTrue(resp) def test_ex_enable_balancer_connection_logging(self): balancer = self.driver.get_balancer(balancer_id='94695') balancer = self.driver.ex_enable_balancer_connection_logging( balancer) self.assertTrue(balancer.extra["connectionLoggingEnabled"]) def test_ex_enable_balancer_connection_logging_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94695') resp = self.driver.ex_enable_balancer_connection_logging_no_poll( balancer) self.assertTrue(resp) def test_ex_disable_balancer_connection_logging(self): balancer = self.driver.get_balancer(balancer_id='8290') balancer = self.driver.ex_disable_balancer_connection_logging( balancer ) self.assertFalse(balancer.extra["connectionLoggingEnabled"]) def test_ex_disable_balancer_connection_logging_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') resp = self.driver.ex_disable_balancer_connection_logging_no_poll( balancer ) self.assertTrue(resp) def test_ex_enable_balancer_session_persistence(self): balancer = self.driver.get_balancer(balancer_id='94695') balancer = self.driver.ex_enable_balancer_session_persistence(balancer) persistence_type = balancer.extra['sessionPersistenceType'] self.assertEqual('HTTP_COOKIE', persistence_type) def test_ex_enable_balancer_session_persistence_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94695') resp = self.driver.ex_enable_balancer_session_persistence_no_poll( balancer) self.assertTrue(resp) def test_disable_balancer_session_persistence(self): balancer = self.driver.get_balancer(balancer_id='8290') balancer = self.driver.ex_disable_balancer_session_persistence( balancer) self.assertTrue('sessionPersistenceType' not in balancer.extra) def test_disable_balancer_session_persistence_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') resp = self.driver.ex_disable_balancer_session_persistence_no_poll( balancer) self.assertTrue(resp) def test_ex_update_balancer_error_page(self): balancer = self.driver.get_balancer(balancer_id='8290') content = "Generic Error Page" balancer = self.driver.ex_update_balancer_error_page( balancer, content) error_page_content = self.driver.ex_get_balancer_error_page(balancer) self.assertEqual(content, error_page_content) def test_ex_update_balancer_error_page_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') content = "Generic Error Page" resp = self.driver.ex_update_balancer_error_page_no_poll( balancer, content) self.assertTrue(resp) def test_ex_disable_balancer_custom_error_page_no_poll(self): balancer = self.driver.get_balancer(balancer_id='94695') resp = self.driver.ex_disable_balancer_custom_error_page_no_poll( balancer) self.assertTrue(resp) def test_ex_disable_balancer_custom_error_page(self): fixtures = LoadBalancerFileFixtures('rackspace') error_page_fixture = json.loads( fixtures.load('error_page_default.json')) default_error_page = error_page_fixture['errorpage']['content'] balancer = self.driver.get_balancer(balancer_id='94695') balancer = self.driver.ex_disable_balancer_custom_error_page(balancer) error_page_content = self.driver.ex_get_balancer_error_page(balancer) self.assertEqual(default_error_page, error_page_content) def test_balancer_list_members(self): expected = set(['10.1.0.10:80', '10.1.0.11:80', '10.1.0.9:8080']) balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() self.assertEqual(len(members), 3) self.assertEqual(members[0].balancer, balancer) self.assertEqual(expected, set(["%s:%s" % (member.ip, member.port) for member in members])) def test_balancer_members_extra_weight(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() self.assertEqual(12, members[0].extra['weight']) self.assertEqual(8, members[1].extra['weight']) def test_balancer_members_extra_condition(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() self.assertEqual(MemberCondition.ENABLED, members[0].extra['condition']) self.assertEqual(MemberCondition.DISABLED, members[1].extra['condition']) self.assertEqual(MemberCondition.DRAINING, members[2].extra['condition']) def test_balancer_members_extra_status(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() self.assertEqual('ONLINE', members[0].extra['status']) self.assertEqual('OFFLINE', members[1].extra['status']) self.assertEqual('DRAINING', members[2].extra['status']) def test_balancer_attach_member(self): balancer = self.driver.get_balancer(balancer_id='8290') extra = {'condition': MemberCondition.DISABLED, 'weight': 10} member = balancer.attach_member(Member(None, ip='10.1.0.12', port='80', extra=extra)) self.assertEqual(member.ip, '10.1.0.12') self.assertEqual(member.port, 80) def test_balancer_attach_member_with_no_condition_specified(self): balancer = self.driver.get_balancer(balancer_id='8291') member = balancer.attach_member(Member(None, ip='10.1.0.12', port='80')) self.assertEqual(member.ip, '10.1.0.12') self.assertEqual(member.port, 80) def test_balancer_attach_members(self): balancer = self.driver.get_balancer(balancer_id='8292') members = [Member(None, ip='10.1.0.12', port='80'), Member(None, ip='10.1.0.13', port='80')] attached_members = self.driver.ex_balancer_attach_members(balancer, members) first_member = attached_members[0] second_member = attached_members[1] self.assertEqual(first_member.ip, '10.1.0.12') self.assertEqual(first_member.port, 80) self.assertEqual(second_member.ip, '10.1.0.13') self.assertEqual(second_member.port, 80) def test_balancer_detach_member(self): balancer = self.driver.get_balancer(balancer_id='8290') member = balancer.list_members()[0] ret = balancer.detach_member(member) self.assertTrue(ret) def test_ex_detach_members(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() balancer = self.driver.ex_balancer_detach_members(balancer, members) self.assertEqual('8290', balancer.id) def test_ex_detach_members_no_poll(self): balancer = self.driver.get_balancer(balancer_id='8290') members = balancer.list_members() ret = self.driver.ex_balancer_detach_members_no_poll(balancer, members) self.assertTrue(ret) def test_update_balancer_protocol(self): balancer = LoadBalancer(id='3130', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer( balancer, protocol='HTTPS') self.assertEqual('HTTPS', updated_balancer.extra['protocol']) def test_update_balancer_protocol_to_imapv2(self): balancer = LoadBalancer(id='3135', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer( balancer, protocol='imapv2') self.assertEqual('IMAPv2', updated_balancer.extra['protocol']) def test_update_balancer_protocol_to_imapv3(self): balancer = LoadBalancer(id='3136', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer( balancer, protocol='IMAPV3') self.assertEqual('IMAPv3', updated_balancer.extra['protocol']) def test_update_balancer_protocol_to_imapv4(self): balancer = LoadBalancer(id='3137', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer( balancer, protocol='IMAPv4') self.assertEqual('IMAPv4', updated_balancer.extra['protocol']) def test_update_balancer_port(self): balancer = LoadBalancer(id='3131', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer(balancer, port=1337) self.assertEqual(1337, updated_balancer.port) def test_update_balancer_name(self): balancer = LoadBalancer(id='3132', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer( balancer, name='new_lb_name') self.assertEqual('new_lb_name', updated_balancer.name) def test_update_balancer_algorithm(self): balancer = LoadBalancer(id='3133', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) updated_balancer = self.driver.update_balancer(balancer, algorithm=Algorithm.ROUND_ROBIN) self.assertEqual( Algorithm.ROUND_ROBIN, updated_balancer.extra['algorithm']) def test_update_balancer_bad_algorithm_exception(self): balancer = LoadBalancer(id='3134', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) try: self.driver.update_balancer(balancer, algorithm='HAVE_MERCY_ON_OUR_SERVERS') except LibcloudError: pass else: self.fail( 'Should have thrown an exception with bad algorithm value') def test_ex_update_balancer_no_poll_protocol(self): balancer = LoadBalancer(id='3130', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) action_succeeded = self.driver.ex_update_balancer_no_poll( balancer, protocol='HTTPS') self.assertTrue(action_succeeded) def test_ex_update_balancer_no_poll_port(self): balancer = LoadBalancer(id='3131', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) action_succeeded = self.driver.ex_update_balancer_no_poll( balancer, port=1337) self.assertTrue(action_succeeded) def test_ex_update_balancer_no_poll_name(self): balancer = LoadBalancer(id='3132', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) action_succeeded = self.driver.ex_update_balancer_no_poll( balancer, name='new_lb_name') self.assertTrue(action_succeeded) def test_ex_update_balancer_no_poll_algorithm(self): balancer = LoadBalancer(id='3133', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) action_succeeded = self.driver.ex_update_balancer_no_poll(balancer, algorithm=Algorithm.ROUND_ROBIN) self.assertTrue(action_succeeded) def test_ex_update_balancer_no_poll_bad_algorithm_exception(self): balancer = LoadBalancer(id='3134', name='LB_update', state='PENDING_UPDATE', ip='10.34.4.3', port=80, driver=self.driver) try: self.driver.update_balancer(balancer, algorithm='HAVE_MERCY_ON_OUR_SERVERS') except LibcloudError: pass else: self.fail('Should have thrown exception with bad algorithm value') def test_ex_update_balancer_member_extra_attributes(self): balancer = self.driver.get_balancer(balancer_id='8290') members = self.driver.balancer_list_members(balancer) first_member = members[0] member = self.driver.ex_balancer_update_member(balancer, first_member, condition=MemberCondition.ENABLED, weight=12) self.assertEqual(MemberCondition.ENABLED, member.extra['condition']) self.assertEqual(12, member.extra['weight']) def test_ex_update_balancer_member_no_poll_extra_attributes(self): balancer = self.driver.get_balancer(balancer_id='8290') members = self.driver.balancer_list_members(balancer) first_member = members[0] resp = self.driver.ex_balancer_update_member_no_poll( balancer, first_member, condition=MemberCondition.ENABLED, weight=12) self.assertTrue(resp) def test_ex_list_current_usage(self): balancer = self.driver.get_balancer(balancer_id='8290') usage = self.driver.ex_list_current_usage(balancer=balancer) self.assertEqual( usage['loadBalancerUsageRecords'][0]['incomingTransferSsl'], 6182163) class RackspaceUKLBTests(RackspaceLBTests): def setUp(self): RackspaceLBDriver.connectionCls.conn_class = RackspaceLBMockHttp RackspaceLBMockHttp.type = None self.driver = RackspaceLBDriver('user', 'key', region='lon') # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() class RackspaceLBMockHttp(MockHttp, unittest.TestCase): fixtures = LoadBalancerFileFixtures('rackspace') auth_fixtures = OpenStackFixtures() def _v2_0_tokens(self, method, url, body, headers): body = self.fixtures.load('_v2_0__auth.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _v1_0_11111_loadbalancers_protocols(self, method, url, body, headers): body = self.fixtures.load('v1_slug_loadbalancers_protocols.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) def _v1_0_11111_loadbalancers_algorithms(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers_algorithms.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == "POST": json_body = json.loads(body) loadbalancer_json = json_body['loadBalancer'] member_1_json, member_2_json = loadbalancer_json['nodes'] self.assertEqual(loadbalancer_json['protocol'], 'HTTP') self.assertEqual(loadbalancer_json['algorithm'], 'ROUND_ROBIN') self.assertEqual(loadbalancer_json['virtualIps'][0]['type'], 'PUBLIC') self.assertEqual(member_1_json['condition'], 'DISABLED') self.assertEqual(member_1_json['weight'], 10) self.assertEqual(member_2_json['condition'], 'ENABLED') body = self.fixtures.load('v1_slug_loadbalancers_post.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) elif method == 'DELETE': balancers = self.fixtures.load('v1_slug_loadbalancers.json') balancers_json = json.loads(balancers) for balancer in balancers_json['loadBalancers']: id = balancer['id'] self.assertTrue(urlencode([('id', id)]) in url, msg='Did not delete balancer with id %d' % id) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_EX_MEMBER_ADDRESS(self, method, url, body, headers): body = self.fixtures.load('v1_slug_loadbalancers_nodeaddress.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_0_11111_loadbalancers_8155(self, method, url, body, headers): if method == "DELETE": return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers_8290.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_nodes(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == "POST": json_body = json.loads(body) json_node = json_body['nodes'][0] self.assertEqual('DISABLED', json_node['condition']) self.assertEqual(10, json_node['weight']) response_body = self.fixtures.load( 'v1_slug_loadbalancers_8290_nodes_post.json') return (httplib.ACCEPTED, response_body, {}, httplib.responses[httplib.ACCEPTED]) elif method == "DELETE": nodes = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') json_nodes = json.loads(nodes) for node in json_nodes['nodes']: id = node['id'] self.assertTrue(urlencode([('id', id)]) in url, msg='Did not delete member with id %d' % id) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8291(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers_8291.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_8291_nodes(self, method, url, body, headers): if method == "POST": json_body = json.loads(body) json_node = json_body['nodes'][0] self.assertEqual('ENABLED', json_node['condition']) response_body = self.fixtures.load( 'v1_slug_loadbalancers_8290_nodes_post.json') return (httplib.ACCEPTED, response_body, {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8292(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers_8292.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_8292_nodes(self, method, url, body, headers): if method == "POST": json_body = json.loads(body) json_node_1 = json_body['nodes'][0] json_node_2 = json_body['nodes'][1] self.assertEqual('10.1.0.12', json_node_1['address']) self.assertEqual('10.1.0.13', json_node_2['address']) response_body = self.fixtures.load( 'v1_slug_loadbalancers_8292_nodes_post.json') return (httplib.ACCEPTED, response_body, {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_nodes_30944(self, method, url, body, headers): if method == "PUT": json_body = json.loads(body) self.assertEqual('ENABLED', json_body['condition']) self.assertEqual(12, json_body['weight']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) elif method == "DELETE": return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_healthmonitor(self, method, url, body, headers): if method == "DELETE": return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_connectionthrottle(self, method, url, body, headers): if method == 'DELETE': return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_connectionlogging(self, method, url, body, headers): # Connection Logging uses a PUT to disable connection logging if method == 'PUT': json_body = json.loads(body) self.assertFalse(json_body["connectionLogging"]["enabled"]) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_sessionpersistence(self, method, url, body, headers): if method == 'DELETE': return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_errorpage(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'v1_slug_loadbalancers_8290_errorpage.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'PUT': json_body = json.loads(body) self.assertEqual('Generic Error Page', json_body['errorpage']['content']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_18940(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_18940_ex_public_ips.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_18945(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_18945_ex_public_ips.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_18940_errorpage(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_18940_errorpage.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_18940_accesslist(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'v1_slug_loadbalancers_18940_accesslist.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_18941(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_18941_ex_private_ips.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94692(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94692_weighted_round_robin.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94693(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94693_weighted_least_connections.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94694(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94694_unknown_algorithm.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94695_full_details.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695_healthmonitor(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) self.assertEqual('CONNECT', json_body['type']) self.assertEqual(10, json_body['delay']) self.assertEqual(5, json_body['timeout']) self.assertEqual(2, json_body['attemptsBeforeDeactivation']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695_connectionthrottle(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) self.assertEqual(50, json_body['minConnections']) self.assertEqual(200, json_body['maxConnections']) self.assertEqual(50, json_body['maxConnectionRate']) self.assertEqual(10, json_body['rateInterval']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695_connectionlogging(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) self.assertTrue(json_body["connectionLogging"]["enabled"]) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695_sessionpersistence(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) persistence_type = json_body[ 'sessionPersistence']['persistenceType'] self.assertEqual('HTTP_COOKIE', persistence_type) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94695_errorpage(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load("error_page_default.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'DELETE': return (httplib.OK, '', {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94696(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94696_http_health_monitor.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94696_healthmonitor(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) self.assertEqual('HTTP', json_body['type']) self.assertEqual(10, json_body['delay']) self.assertEqual(5, json_body['timeout']) self.assertEqual(2, json_body['attemptsBeforeDeactivation']) self.assertEqual('/', json_body['path']) self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex']) self.assertEqual('Hello World!', json_body['bodyRegex']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94697(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94697_https_health_monitor.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94698(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94698_with_access_list.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94698_accesslist(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'v1_slug_loadbalancers_94698_accesslist.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'POST': json_body = json.loads(body) self.assertEqual('0.0.0.0/0', json_body['networkItem']['address']) self.assertEqual('DENY', json_body['networkItem']['type']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94699(self, method, url, body, headers): if method == 'GET': # Use the same fixture for batch deletes as for single deletes body = self.fixtures.load( 'v1_slug_loadbalancers_94698_with_access_list.json') json_body = json.loads(body) json_body['loadBalancer']['id'] = 94699 updated_body = json.dumps(json_body) return (httplib.OK, updated_body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94699_accesslist(self, method, url, body, headers): if method == 'DELETE': fixture = 'v1_slug_loadbalancers_94698_with_access_list.json' fixture_json = json.loads(self.fixtures.load(fixture)) access_list_json = fixture_json['loadBalancer']['accessList'] for access_rule in access_list_json: id = access_rule['id'] self.assertTrue(urlencode([('id', id)]) in url, msg='Did not delete access rule with id %d' % id) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) elif method == 'POST': json_body = json.loads(body) access_list = json_body['accessList'] self.assertEqual('ALLOW', access_list[0]['type']) self.assertEqual('2001:4801:7901::6/64', access_list[0]['address']) self.assertEqual('DENY', access_list[1]['type']) self.assertEqual('8.8.8.8/0', access_list[1]['address']) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94698_accesslist_1007(self, method, url, body, headers): if method == 'DELETE': return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_94700(self, method, url, body, headers): if method == "GET": body = self.fixtures.load( "v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_94700_healthmonitor(self, method, url, body, headers): if method == 'PUT': json_body = json.loads(body) self.assertEqual('HTTP', json_body['type']) self.assertEqual(10, json_body['delay']) self.assertEqual(5, json_body['timeout']) self.assertEqual(2, json_body['attemptsBeforeDeactivation']) self.assertEqual('/', json_body['path']) self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex']) self.assertFalse('bodyRegex' in json_body) return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError def _v1_0_11111_loadbalancers_3130(self, method, url, body, headers): """ update_balancer(b, protocol='HTTPS'), then get_balancer('3130') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'protocol': 'HTTPS'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3130 response_body['loadBalancer']['protocol'] = 'HTTPS' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3131(self, method, url, body, headers): """ update_balancer(b, port=443), then get_balancer('3131') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'port': 1337}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3131 response_body['loadBalancer']['port'] = 1337 return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3132(self, method, url, body, headers): """ update_balancer(b, name='new_lb_name'), then get_balancer('3132') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'name': 'new_lb_name'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3132 response_body['loadBalancer']['name'] = 'new_lb_name' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3133(self, method, url, body, headers): """ update_balancer(b, algorithm='ROUND_ROBIN'), then get_balancer('3133') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'algorithm': 'ROUND_ROBIN'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3133 response_body['loadBalancer']['algorithm'] = 'ROUND_ROBIN' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3134(self, method, url, body, headers): """ update.balancer(b, algorithm='HAVE_MERCY_ON_OUR_SERVERS') """ if method == "PUT": return (httplib.BAD_REQUEST, "", {}, httplib.responses[httplib.BAD_REQUEST]) raise NotImplementedError def _v1_0_11111_loadbalancers_3135(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3135') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'protocol': 'IMAPv2'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3135 response_body['loadBalancer']['protocol'] = 'IMAPv2' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3136(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3136') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'protocol': 'IMAPv3'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3136 response_body['loadBalancer']['protocol'] = 'IMAPv3' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_3137(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3137') """ if method == "PUT": json_body = json.loads(body) self.assertDictEqual(json_body, {'protocol': 'IMAPv4'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads( self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) response_body['loadBalancer']['id'] = 3137 response_body['loadBalancer']['protocol'] = 'IMAPv4' return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) raise NotImplementedError def _v1_0_11111_loadbalancers_8290_usage_current(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load( 'v1_0_slug_loadbalancers_8290_usage_current.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) raise NotImplementedError class RackspaceLBWithVIPMockHttp(MockHttp, unittest.TestCase): fixtures = LoadBalancerFileFixtures('rackspace') auth_fixtures = OpenStackFixtures() def _v2_0_tokens(self, method, url, body, headers): body = self.fixtures.load('_v2_0__auth.json') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _v1_0_11111_loadbalancers(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('v1_slug_loadbalancers.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == "POST": json_body = json.loads(body) loadbalancer_json = json_body['loadBalancer'] self.assertEqual(loadbalancer_json['virtualIps'][0]['id'], '12af') body = self.fixtures.load('v1_slug_loadbalancers_post.json') return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) raise NotImplementedError if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_alb.py0000664000175000017500000001761013153541406023757 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.loadbalancer.drivers.alb import ApplicationLBDriver from libcloud.loadbalancer.types import State from libcloud.test import MockHttp from libcloud.test.secrets import LB_ALB_PARAMS from libcloud.test.file_fixtures import LoadBalancerFileFixtures class ApplicationLBTests(unittest.TestCase): def setUp(self): ApplicationLBMockHttp.test = self ApplicationLBDriver.connectionCls.conn_class = ApplicationLBMockHttp ApplicationLBMockHttp.type = None ApplicationLBMockHttp.use_param = 'Action' self.driver = ApplicationLBDriver(*LB_ALB_PARAMS) def test_instantiate_driver_with_token(self): token = 'temporary_credentials_token' driver = ApplicationLBDriver(*LB_ALB_PARAMS, **{'token': token}) self.assertTrue(hasattr(driver, 'token'), 'Driver has no attribute token') self.assertEquals(token, driver.token, "Driver token does not match with provided token") def test_driver_with_token_signature_version(self): token = 'temporary_credentials_token' driver = ApplicationLBDriver(*LB_ALB_PARAMS, **{'token': token}) kwargs = driver._ex_connection_class_kwargs() self.assertTrue(('signature_version' in kwargs), 'Driver has no attribute signature_version') self.assertEquals('4', kwargs['signature_version'], 'Signature version is not 4 with temporary credentials') def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 2) self.assertTrue('http' in protocols) self.assertTrue('https' in protocols) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 1) self.assertEqual( balancers[0].id, 'arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/1111111111111111' ) self.assertEqual(balancers[0].name, 'Test-ALB') def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') self.assertEqual( balancer.id, 'arn:aws:elasticloadbalancing:us-east-1:111111111111:loadbalancer/app/Test-ALB/1111111111111111' ) self.assertEqual(balancer.name, 'Test-ALB') self.assertEqual(balancer.state, State.UNKNOWN) def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') members = balancer.list_members() self.assertEqual(len(members), 1) self.assertEqual(members[0].balancer, balancer) self.assertEqual('i-01111111111111111', members[0].id) def test_ex_balancer_list_listeners(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') self.assertTrue(('listeners' in balancer.extra), 'No listeners dict found in balancer.extra') listeners = self.driver.ex_balancer_list_listeners(balancer) self.assertEqual(len(listeners), 1) def test_ex_get_balancer_tags(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') self.assertTrue(('tags' in balancer.extra), 'No tags dict found in balancer.extra') tags = self.driver._ex_get_balancer_tags(balancer) self.assertEqual(tags['project'], 'lima') def test_ex_get_target_group_members(self): target_group_members = self.driver._ex_get_target_group_members( 'arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/TEST-TARGET-GROUP1/1111111111111111' ) self.assertEqual(len(target_group_members), 1) self.assertTrue(('id' in target_group_members[0]), 'Target group member is missing "id" field') self.assertTrue(('port' in target_group_members[0]), 'Target group member is missing "port" field') self.assertTrue(('health' in target_group_members[0]), 'Target group member is missing "health" field') def test_ex_get_balancer_target_groups(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') target_groups = self.driver._ex_get_balancer_target_groups(balancer) self.assertEqual(len(target_groups), 1) self.assertTrue(('id' in target_groups[0]), 'Target group is missing "id" field') self.assertTrue(('name' in target_groups[0]), 'Target group is missing "port" field') self.assertTrue(('members' in target_groups[0]), 'Target group is missing "members" field') def test_ex_get_balancer_listeners(self): balancer = self.driver.get_balancer(balancer_id='Test-ALB') listeners = self.driver._ex_get_balancer_listeners(balancer) self.assertEqual(len(listeners), 1) self.assertTrue(('id' in listeners[0]), 'Listener is missing "id" field') self.assertTrue(('port' in listeners[0]), 'Listener is missing "port" field') self.assertTrue(('protocol' in listeners[0]), 'Listener is missing "protocol" field') self.assertTrue(('rules' in listeners[0]), 'Listener is missing "rules" field') def test_ex_get_rules_for_listener(self): listener_rules = self.driver._ex_get_rules_for_listener( 'arn:aws:elasticloadbalancing:us-east-1:111111111111:listener/app/Test-ALB/1111111111111111/1111111111111111' ) self.assertEqual(len(listener_rules), 1) self.assertTrue(('id' in listener_rules[0]), 'Rule is missing "id" field') self.assertTrue(('is_default' in listener_rules[0]), 'Rule is missing "port" field') self.assertTrue(('priority' in listener_rules[0]), 'Rule is missing "priority" field') self.assertTrue(('target_group' in listener_rules[0]), 'Rule is missing "target_group" field') self.assertTrue(('conditions' in listener_rules[0]), 'Rule is missing "conditions" field') class ApplicationLBMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('alb') def _2015_12_01_DescribeLoadBalancers(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2015_12_01_DescribeListeners(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancer_listeters.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2015_12_01_DescribeRules(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancer_rules.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2015_12_01_DescribeTargetGroups(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancer_target_groups.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2015_12_01_DescribeTargetHealth(self, method, url, body, headers): body = self.fixtures.load('describe_target_health.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2015_12_01_DescribeTags(self, method, url, body, headers): body = self.fixtures.load('describe_tags.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_brightbox.py0000664000175000017500000001271713153541406025214 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.drivers.brightbox import BrightboxLBDriver from libcloud.loadbalancer.types import State from libcloud.test import MockHttp from libcloud.test.secrets import LB_BRIGHTBOX_PARAMS from libcloud.test.file_fixtures import LoadBalancerFileFixtures class BrightboxLBTests(unittest.TestCase): def setUp(self): BrightboxLBDriver.connectionCls.conn_class = BrightboxLBMockHttp BrightboxLBMockHttp.type = None self.driver = BrightboxLBDriver(*LB_BRIGHTBOX_PARAMS) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 2) self.assertTrue('tcp' in protocols) self.assertTrue('http' in protocols) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 1) self.assertEqual(balancers[0].id, 'lba-1235f') self.assertEqual(balancers[0].name, 'lb1') def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') self.assertEqual(balancer.id, 'lba-1235f') self.assertEqual(balancer.name, 'lb1') self.assertEqual(balancer.state, State.RUNNING) def test_destroy_balancer(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') self.assertTrue(self.driver.destroy_balancer(balancer)) def test_create_balancer(self): members = [Member('srv-lv426', None, None)] balancer = self.driver.create_balancer(name='lb2', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'lb2') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.state, State.PENDING) def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') members = balancer.list_members() self.assertEqual(len(members), 1) self.assertEqual(members[0].balancer, balancer) self.assertEqual('srv-lv426', members[0].id) def test_balancer_attach_member(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') member = balancer.attach_member(Member('srv-kg983', ip=None, port=None)) self.assertEqual(member.id, 'srv-kg983') def test_balancer_detach_member(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') member = Member('srv-lv426', None, None) self.assertTrue(balancer.detach_member(member)) class BrightboxLBMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('brightbox') def _token(self, method, url, body, headers): if method == 'POST': return (httplib.OK, self.fixtures.load('token.json'), {'content-type': 'application/json'}, httplib.responses[httplib.OK]) def _1_0_load_balancers(self, method, url, body, headers): if method == 'GET': return (httplib.OK, self.fixtures.load('load_balancers.json'), {'content-type': 'application/json'}, httplib.responses[httplib.OK]) elif method == 'POST': body = self.fixtures.load('load_balancers_post.json') return (httplib.ACCEPTED, body, {'content-type': 'application/json'}, httplib.responses[httplib.ACCEPTED]) def _1_0_load_balancers_lba_1235f(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('load_balancers_lba_1235f.json') return (httplib.OK, body, {'content-type': 'application/json'}, httplib.responses[httplib.OK]) elif method == 'DELETE': return (httplib.ACCEPTED, '', {'content-type': 'application/json'}, httplib.responses[httplib.ACCEPTED]) def _1_0_load_balancers_lba_1235f_add_nodes(self, method, url, body, headers): if method == 'POST': return (httplib.ACCEPTED, '', {'content-type': 'application/json'}, httplib.responses[httplib.ACCEPTED]) def _1_0_load_balancers_lba_1235f_remove_nodes(self, method, url, body, headers): if method == 'POST': return (httplib.ACCEPTED, '', {'content-type': 'application/json'}, httplib.responses[httplib.ACCEPTED]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_cloudstack.py0000664000175000017500000001065313153541406025355 0ustar kamikami00000000000000import sys try: import simplejson as json except ImportError: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qsl from libcloud.loadbalancer.types import Provider from libcloud.loadbalancer.providers import get_driver from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.file_fixtures import LoadBalancerFileFixtures class CloudStackLBTests(unittest.TestCase): def setUp(self): CloudStackLBDriver.connectionCls.conn_class = CloudStackMockHttp CloudStackLBDriver.path = '/test/path' CloudStackLBDriver.type = -1 CloudStackLBDriver.name = 'CloudStack' self.driver = CloudStackLBDriver('apikey', 'secret') CloudStackMockHttp.fixture_tag = 'default' self.driver.connection.poll_interval = 0.0 def test_user_must_provide_host_and_path(self): CloudStackLBDriver.path = None CloudStackLBDriver.type = Provider.CLOUDSTACK expected_msg = 'When instantiating CloudStack driver directly ' + \ 'you also need to provide host and path argument' cls = get_driver(Provider.CLOUDSTACK) self.assertRaisesRegexp(Exception, expected_msg, cls, 'key', 'secret') try: cls('key', 'secret', True, 'localhost', '/path') except Exception: self.fail('host and path provided but driver raised an exception') def test_list_supported_algorithms(self): algorithms = self.driver.list_supported_algorithms() self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) def test_list_balancers(self): balancers = self.driver.list_balancers() for balancer in balancers: self.assertTrue(isinstance(balancer, LoadBalancer)) def test_create_balancer(self): members = [Member(1, '1.1.1.1', 80), Member(2, '1.1.1.2', 80)] balancer = self.driver.create_balancer( name='test', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertTrue(isinstance(balancer, LoadBalancer)) def test_destroy_balancer(self): balancer = self.driver.list_balancers()[0] self.driver.destroy_balancer(balancer) def test_balancer_attach_member(self): balancer = self.driver.list_balancers()[0] member = Member(id=1234, ip='1.1.1.1', port=80) balancer.attach_member(member) def test_balancer_detach_member(self): balancer = self.driver.list_balancers()[0] member = balancer.list_members()[0] balancer.detach_member(member) def test_balancer_list_members(self): balancer = self.driver.list_balancers()[0] members = balancer.list_members() for member in members: self.assertTrue(isinstance(member, Member)) self.assertEqual(member.balancer, balancer) class CloudStackMockHttp(MockHttp, unittest.TestCase): fixtures = LoadBalancerFileFixtures('cloudstack') fixture_tag = 'default' def _load_fixture(self, fixture): body = self.fixtures.load(fixture) return body, json.loads(body) def _test_path(self, method, url, body, headers): url = urlparse.urlparse(url) query = dict(parse_qsl(url.query)) self.assertTrue('apiKey' in query) self.assertTrue('command' in query) self.assertTrue('response' in query) self.assertTrue('signature' in query) self.assertTrue(query['response'] == 'json') del query['apiKey'] del query['response'] del query['signature'] command = query.pop('command') if hasattr(self, '_cmd_' + command): return getattr(self, '_cmd_' + command)(**query) else: fixture = command + '_' + self.fixture_tag + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _cmd_queryAsyncJobResult(self, jobid): fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' body, obj = self._load_fixture(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_softlayer.py0000664000175000017500000001775613153541406025244 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib from libcloud.compute.base import NodeLocation from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.drivers.softlayer import SoftlayerLBDriver from libcloud.loadbalancer.types import State from libcloud.test import MockHttp from libcloud.test.secrets import SOFTLAYER_PARAMS from libcloud.test.file_fixtures import LoadBalancerFileFixtures class SoftlayerLBTests(unittest.TestCase): def setUp(self): SoftlayerLBDriver.connectionCls.conn_class = SoftLayerMockHttp SoftLayerMockHttp.type = None self.driver = SoftlayerLBDriver(*SOFTLAYER_PARAMS) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 6) self.assertTrue('tcp' in protocols) self.assertTrue('http' in protocols) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 2) self.assertEqual(balancers[0].id, '76185') self.assertEqual(balancers[0].extra['datacenter'], 'dal05') self.assertEqual(balancers[0].extra['connection_limit'], 50) self.assertEqual(balancers[1].id, '76265') self.assertEqual(balancers[1].extra['datacenter'], 'par01') self.assertEqual(balancers[1].extra['connection_limit'], 50) def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='76185') self.assertEqual(balancer.id, '76185') self.assertEqual(balancer.state, State.UNKNOWN) self.assertEqual(balancer.extra['datacenter'], 'dal05') self.assertEqual(balancer.extra['protocol'], 'http') self.assertEqual(balancer.extra['algorithm'], Algorithm.ROUND_ROBIN) def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='76185') members = balancer.list_members() self.assertEqual(len(members), 3) self.assertEqual(members[0].balancer, balancer) self.assertEqual(members[0].id, '226227') self.assertEqual(members[0].ip, '10.126.5.34') self.assertEqual(members[1].balancer, balancer) self.assertEqual(members[1].id, '226229') self.assertEqual(members[1].ip, '10.126.5.35') def test_balancer_attach_member(self): balancer = self.driver.get_balancer(balancer_id='76185') member = balancer.attach_member(Member(None, ip='10.126.5.34', port=8000)) self.assertEqual(member.id, '226227') self.assertEqual(member.ip, '10.126.5.34') self.assertEqual(member.port, 8000) def test_balancer_detach_member(self): balancer = self.driver.get_balancer(balancer_id='76265') member = Member('226227', None, None) self.assertTrue(balancer.detach_member(member)) def test_destroy_balancer(self): balancer = self.driver.get_balancer(balancer_id='76185') self.assertTrue(self.driver.destroy_balancer(balancer)) def test_ex_list_balancer_packages(self): packages = self.driver.ex_list_balancer_packages() self.assertEqual(len(packages), 9) def test_ex_place_balancer_order(self): packages = self.driver.ex_list_balancer_packages() lb_package = [p for p in packages if p.capacity == 50][0] self.assertTrue(self.driver.ex_place_balancer_order( lb_package, NodeLocation('dal05', None, None, None))) class SoftLayerMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('softlayer') def _get_method_name(self, type, use_param, qs, path): return "_xmlrpc" def _xmlrpc(self, method, url, body, headers): params, meth_name = xmlrpclib.loads(body) url = url.replace("/", "_") meth_name = "%s_%s" % (url, meth_name) return getattr(self, meth_name)(method, url, body, headers) def _xmlrpc_v3_SoftLayer_Account_getAdcLoadBalancers( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Account_getAdcLoadBalancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Billing_Item_cancelService(self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Billing_Item_cancelService.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Location_Datacenter_getDatacenters.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_deleteObject( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Network_Application_Delivery_Controller_' 'LoadBalancer_Service_deleteObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_editObject( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Network_Application_Delivery_Controller_' 'LoadBalancer_VirtualIpAddress_editObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getBillingItem( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Network_Application_Delivery_Controller_' 'LoadBalancer_VirtualIpAddress_getBillingItem.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getObject( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Network_Application_Delivery_Controller_' 'LoadBalancer_VirtualIpAddress_getObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Network_Subnet_IpAddress_getByIpAddress( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Network_Subnet_IpAddress_getByIpAddress.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Product_Order_placeOrder(self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Product_Order_placeOrder.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3_SoftLayer_Product_Package_getItems(self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Product_Package_getItems.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_ninefold.py0000664000175000017500000000054212701023453025006 0ustar kamikami00000000000000import sys import unittest from libcloud.loadbalancer.types import Provider from libcloud.loadbalancer.providers import get_driver class NinefoldLbTestCase(unittest.TestCase): def test_driver_instantiation(self): cls = get_driver(Provider.NINEFOLD) cls('username', 'key') if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_gogrid.py0000664000175000017500000002005313153541406024467 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.common.types import LibcloudError from libcloud.compute.base import Node from libcloud.compute.drivers.dummy import DummyNodeDriver from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm from libcloud.loadbalancer.drivers.gogrid import GoGridLBDriver from libcloud.test import MockHttp from libcloud.test.file_fixtures import LoadBalancerFileFixtures class GoGridTests(unittest.TestCase): def setUp(self): GoGridLBDriver.connectionCls.conn_class = GoGridLBMockHttp GoGridLBMockHttp.type = None self.driver = GoGridLBDriver('user', 'key') def test_list_supported_algorithms(self): algorithms = self.driver.list_supported_algorithms() self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 1) self.assertEqual(protocols[0], 'http') def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 2) self.assertEqual(balancers[0].name, "foo") self.assertEqual(balancers[0].id, "23517") self.assertEqual(balancers[1].name, "bar") self.assertEqual(balancers[1].id, "23526") def test_create_balancer(self): balancer = self.driver.create_balancer(name='test2', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=( Member( None, '10.1.0.10', 80), Member(None, '10.1.0.11', 80)) ) self.assertEqual(balancer.name, 'test2') self.assertEqual(balancer.id, '123') def test_create_balancer_UNEXPECTED_ERROR(self): # Try to create new balancer and attach members with an IP address which # does not belong to this account GoGridLBMockHttp.type = 'UNEXPECTED_ERROR' try: self.driver.create_balancer(name='test2', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=(Member(None, '10.1.0.10', 80), Member(None, '10.1.0.11', 80)) ) except LibcloudError: e = sys.exc_info()[1] self.assertTrue( str(e).find('tried to add a member with an IP address not assigned to your account') != -1) else: self.fail('Exception was not thrown') def test_destroy_balancer(self): balancer = self.driver.list_balancers()[0] ret1 = self.driver.destroy_balancer(balancer) ret2 = balancer.destroy() self.assertTrue(ret1) self.assertTrue(ret2) def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='23530') self.assertEqual(balancer.name, 'test2') self.assertEqual(balancer.id, '23530') def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='23530') members1 = self.driver.balancer_list_members(balancer=balancer) members2 = balancer.list_members() expected_members = set(['10.0.0.78:80', '10.0.0.77:80', '10.0.0.76:80']) self.assertEqual(len(members1), 3) self.assertEqual(len(members2), 3) self.assertEqual(expected_members, set(["%s:%s" % (member.ip, member.port) for member in members1])) self.assertEqual(members1[0].balancer, balancer) def test_balancer_attach_compute_node(self): balancer = LoadBalancer(23530, None, None, None, None, self.driver) node = Node(id='1', name='test', state=None, public_ips=['10.0.0.75'], private_ips=[], driver=DummyNodeDriver) member1 = self.driver.balancer_attach_compute_node(balancer, node) member2 = balancer.attach_compute_node(node) self.assertEqual(member1.ip, '10.0.0.75') self.assertEqual(member1.port, 80) self.assertEqual(member2.ip, '10.0.0.75') self.assertEqual(member2.port, 80) def test_balancer_attach_member(self): balancer = LoadBalancer(23530, None, None, None, None, self.driver) member = Member(None, ip='10.0.0.75', port='80') member1 = self.driver.balancer_attach_member(balancer, member=member) member2 = balancer.attach_member(member=member) self.assertEqual(member1.ip, '10.0.0.75') self.assertEqual(member1.port, 80) self.assertEqual(member2.ip, '10.0.0.75') self.assertEqual(member2.port, 80) def test_balancer_detach_member(self): balancer = LoadBalancer(23530, None, None, None, None, self.driver) member = self.driver.balancer_list_members(balancer)[0] ret1 = self.driver.balancer_detach_member(balancer, member) ret2 = balancer.detach_member(member) self.assertTrue(ret1) self.assertTrue(ret2) class GoGridLBMockHttp(MockHttp, unittest.TestCase): fixtures = LoadBalancerFileFixtures('gogrid') def _api_grid_loadbalancer_list(self, method, url, body, headers): body = self.fixtures.load('loadbalancer_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_ip_list(self, method, url, body, headers): body = self.fixtures.load('ip_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_loadbalancer_add(self, method, url, body, headers): query = urlparse.urlparse(url).query self.assertTrue(query.find('loadbalancer.type=round+robin') != -1) body = self.fixtures.load('loadbalancer_add.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_ip_list_UNEXPECTED_ERROR(self, method, url, body, headers): return self._api_grid_ip_list(method, url, body, headers) def _api_grid_loadbalancer_add_UNEXPECTED_ERROR(self, method, url, body, headers): body = self.fixtures.load('unexpected_error.json') return (httplib.INTERNAL_SERVER_ERROR, body, {}, httplib.responses[httplib.OK]) def _api_grid_loadbalancer_delete(self, method, url, body, headers): body = self.fixtures.load('loadbalancer_add.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_loadbalancer_get(self, method, url, body, headers): body = self.fixtures.load('loadbalancer_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_grid_loadbalancer_edit(self, method, url, body, headers): body = self.fixtures.load('loadbalancer_edit.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_dimensiondata_v2_4.py0000664000175000017500000006410113153541406026667 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.common.types import InvalidCredsError from libcloud.common.dimensiondata import DimensionDataVIPNode, DimensionDataPool from libcloud.common.dimensiondata import DimensionDataPoolMember from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm from libcloud.loadbalancer.drivers.dimensiondata \ import DimensionDataLBDriver as DimensionData from libcloud.loadbalancer.types import State from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import LoadBalancerFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS class DimensionData_v2_4_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.4' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_balancers() def test_create_balancer(self): self.driver.ex_set_current_network_domain('1234') members = [] members.append(Member( id=None, ip='1.2.3.4', port=80)) balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members, ex_listener_ip_address='5.6.7.8') self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') self.assertEqual(balancer.extra['listener_ip_address'], '5.6.7.8') def test_create_balancer_with_defaults(self): self.driver.ex_set_current_network_domain('1234') balancer = self.driver.create_balancer( name='test', port=None, protocol=None, algorithm=None, members=None) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, None) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_create_balancer_no_members(self): self.driver.ex_set_current_network_domain('1234') members = None balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_create_balancer_empty_members(self): self.driver.ex_set_current_network_domain('1234') members = [] balancer = self.driver.create_balancer( name='test', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'test') self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(balancer.ip, '165.180.12.22') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(balancer.extra['network_domain_id'], '1234') def test_list_balancers(self): bal = self.driver.list_balancers() self.assertEqual(bal[0].name, 'myProduction.Virtual.Listener') self.assertEqual(bal[0].id, '6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal[0].port, '8899') self.assertEqual(bal[0].ip, '165.180.12.22') self.assertEqual(bal[0].state, State.RUNNING) def test_balancer_list_members(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) members = self.driver.balancer_list_members(balancer) self.assertEqual(2, len(members)) self.assertEqual(members[0].ip, '10.0.3.13') self.assertEqual(members[0].id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(members[0].port, 9889) def test_balancer_attach_member(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id=None, ip='112.12.2.2', port=80, balancer=balancer, extra=None) member = self.driver.balancer_attach_member(balancer, member) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') def test_balancer_attach_member_without_port(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id=None, ip='112.12.2.2', port=None, balancer=balancer, extra=None) member = self.driver.balancer_attach_member(balancer, member) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.port, None) def test_balancer_detach_member(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) member = Member( id='3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0', ip='112.12.2.2', port=80, balancer=balancer, extra=None) result = self.driver.balancer_detach_member(balancer, member) self.assertEqual(result, True) def test_destroy_balancer(self): extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', 'network_domain_id': '1234'} balancer = LoadBalancer( id='234', name='test', state=State.RUNNING, ip='1.2.3.4', port=1234, driver=self.driver, extra=extra ) response = self.driver.destroy_balancer(balancer) self.assertEqual(response, True) def test_set_get_network_domain_id(self): self.driver.ex_set_current_network_domain('1234') nwd = self.driver.ex_get_current_network_domain() self.assertEqual(nwd, '1234') def test_ex_create_pool_member(self): pool = DimensionDataPool( id='4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None ) node = DimensionDataVIPNode( id='2344', name='test', status=State.RUNNING, ip='123.23.3.2' ) member = self.driver.ex_create_pool_member( pool=pool, node=node, port=80 ) self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.name, '10.0.3.13') self.assertEqual(member.ip, '123.23.3.2') def test_ex_create_node(self): node = self.driver.ex_create_node( network_domain_id='12345', name='test', ip='123.12.32.2', ex_description='', connection_limit=25000, connection_rate_limit=2000) self.assertEqual(node.name, 'myProductionNode.1') self.assertEqual(node.id, '9e6b496d-5261-4542-91aa-b50c7f569c54') def test_ex_create_pool(self, ): pool = self.driver.ex_create_pool( network_domain_id='1234', name='test', balancer_method='ROUND_ROBIN', ex_description='test', service_down_action='NONE', slow_ramp_time=30) self.assertEqual(pool.id, '9e6b496d-5261-4542-91aa-b50c7f569c54') self.assertEqual(pool.name, 'test') self.assertEqual(pool.status, State.RUNNING) def test_ex_create_virtual_listener(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', port=80, pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_unusual_port(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', port=8900, pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_without_port(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test', pool=DimensionDataPool( id='1234', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None )) self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_ex_create_virtual_listener_without_pool(self): listener = self.driver.ex_create_virtual_listener( network_domain_id='12345', name='test', ex_description='test') self.assertEqual(listener.id, '8334f461-0df0-42d5-97eb-f4678eb26bea') self.assertEqual(listener.name, 'test') def test_get_balancer(self): bal = self.driver.get_balancer('6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal.name, 'myProduction.Virtual.Listener') self.assertEqual(bal.id, '6115469d-a8bb-445b-bb23-d23b5283f2b9') self.assertEqual(bal.port, '8899') self.assertEqual(bal.ip, '165.180.12.22') self.assertEqual(bal.state, State.RUNNING) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertNotEqual(0, len(protocols)) def test_ex_get_nodes(self): nodes = self.driver.ex_get_nodes() self.assertEqual(2, len(nodes)) self.assertEqual(nodes[0].name, 'ProductionNode.1') self.assertEqual(nodes[0].id, '34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(nodes[0].ip, '10.10.10.101') def test_ex_get_node(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(node.name, 'ProductionNode.2') self.assertEqual(node.id, '34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertEqual(node.ip, '10.10.10.101') def test_ex_update_node(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') node.connection_limit = '100' result = self.driver.ex_update_node(node) self.assertEqual(result.connection_limit, '100') def test_ex_destroy_node(self): result = self.driver.ex_destroy_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') self.assertTrue(result) def test_ex_set_node_state(self): node = self.driver.ex_get_node('34de6ed6-46a4-4dae-a753-2f8d3840c6f9') result = self.driver.ex_set_node_state(node, False) self.assertEqual(result.connection_limit, '10000') def test_ex_get_pools(self): pools = self.driver.ex_get_pools() self.assertNotEqual(0, len(pools)) self.assertEqual(pools[0].name, 'myDevelopmentPool.1') self.assertEqual(pools[0].id, '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') def test_ex_get_pool(self): pool = self.driver.ex_get_pool('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') self.assertEqual(pool.name, 'myDevelopmentPool.1') self.assertEqual(pool.id, '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') def test_ex_update_pool(self): pool = self.driver.ex_get_pool('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') pool.slow_ramp_time = '120' result = self.driver.ex_update_pool(pool) self.assertTrue(result) def test_ex_destroy_pool(self): response = self.driver.ex_destroy_pool( pool=DimensionDataPool( id='4d360b1f-bc2c-4ab7-9884-1f03ba2768f7', name='test', description='test', status=State.RUNNING, health_monitor_id=None, load_balance_method=None, service_down_action=None, slow_ramp_time=None)) self.assertTrue(response) def test_get_pool_members(self): members = self.driver.ex_get_pool_members('4d360b1f-bc2c-4ab7-9884-1f03ba2768f7') self.assertEqual(2, len(members)) self.assertEqual(members[0].id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(members[0].name, '10.0.3.13') self.assertEqual(members[0].status, 'NORMAL') self.assertEqual(members[0].ip, '10.0.3.13') self.assertEqual(members[0].port, 9889) self.assertEqual(members[0].node_id, '3c207269-e75e-11e4-811f-005056806999') def test_get_pool_member(self): member = self.driver.ex_get_pool_member('3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') self.assertEqual(member.name, '10.0.3.13') self.assertEqual(member.status, 'NORMAL') self.assertEqual(member.ip, '10.0.3.13') self.assertEqual(member.port, 9889) def test_set_pool_member_state(self): member = self.driver.ex_get_pool_member('3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0') result = self.driver.ex_set_pool_member_state(member, True) self.assertTrue(result) def test_ex_destroy_pool_member(self): response = self.driver.ex_destroy_pool_member( member=DimensionDataPoolMember( id='', name='test', status=State.RUNNING, ip='1.2.3.4', port=80, node_id='3c207269-e75e-11e4-811f-005056806999'), destroy_node=False) self.assertTrue(response) def test_ex_destroy_pool_member_with_node(self): response = self.driver.ex_destroy_pool_member( member=DimensionDataPoolMember( id='', name='test', status=State.RUNNING, ip='1.2.3.4', port=80, node_id='34de6ed6-46a4-4dae-a753-2f8d3840c6f9'), destroy_node=True) self.assertTrue(response) def test_ex_get_default_health_monitors(self): monitors = self.driver.ex_get_default_health_monitors( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(monitors), 6) self.assertEqual(monitors[0].id, '01683574-d487-11e4-811f-005056806999') self.assertEqual(monitors[0].name, 'CCDEFAULT.Http') self.assertFalse(monitors[0].node_compatible) self.assertTrue(monitors[0].pool_compatible) def test_ex_get_default_persistence_profiles(self): profiles = self.driver.ex_get_default_persistence_profiles( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(profiles), 4) self.assertEqual(profiles[0].id, 'a34ca024-f3db-11e4-b010-005056806999') self.assertEqual(profiles[0].name, 'CCDEFAULT.Cookie') self.assertEqual(profiles[0].fallback_compatible, False) self.assertEqual(len(profiles[0].compatible_listeners), 1) self.assertEqual(profiles[0].compatible_listeners[0].type, 'PERFORMANCE_LAYER_4') def test_ex_get_default_irules(self): irules = self.driver.ex_get_default_irules( '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7' ) self.assertEqual(len(irules), 4) self.assertEqual(irules[0].id, '2b20cb2c-ffdc-11e4-b010-005056806999') self.assertEqual(irules[0].name, 'CCDEFAULT.HttpsRedirect') self.assertEqual(len(irules[0].compatible_listeners), 1) self.assertEqual(irules[0].compatible_listeners[0].type, 'PERFORMANCE_LAYER_4') class DimensionDataMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('dimensiondata') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_virtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_virtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_pool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_pool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_poolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_poolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createPool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createPool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_addPoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_addPoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_createVirtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_createVirtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_removePoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_removePoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deleteVirtualListener(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deleteVirtualListener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deletePool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deletePool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_deleteNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_deleteNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_node(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_node.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editNode(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editNode.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editPool(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editPool.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_editPoolMember(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_editPoolMember.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultHealthMonitor(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultHealthMonitor.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultPersistenceProfile(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultPersistenceProfile.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkDomainVip_defaultIrule(self, method, url, body, headers): body = self.fixtures.load( 'networkDomainVip_defaultIrule.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_elb.py0000664000175000017500000002346213153541406023765 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.drivers.elb import ElasticLBDriver from libcloud.loadbalancer.types import State from libcloud.test import MockHttp from libcloud.test.secrets import LB_ELB_PARAMS from libcloud.test.file_fixtures import LoadBalancerFileFixtures class ElasticLBTests(unittest.TestCase): def setUp(self): ElasticLBMockHttp.test = self ElasticLBDriver.connectionCls.conn_class = ElasticLBMockHttp ElasticLBMockHttp.type = None ElasticLBMockHttp.use_param = 'Action' self.driver = ElasticLBDriver(*LB_ELB_PARAMS) def test_instantiate_driver_with_token(self): token = 'temporary_credentials_token' driver = ElasticLBDriver(*LB_ELB_PARAMS, **{'token': token}) self.assertTrue(hasattr(driver, 'token'), 'Driver has no attribute token') self.assertEquals(token, driver.token, "Driver token does not match with provided token") def test_driver_with_token_signature_version(self): token = 'temporary_credentials_token' driver = ElasticLBDriver(*LB_ELB_PARAMS, **{'token': token}) kwargs = driver._ex_connection_class_kwargs() self.assertTrue(('signature_version' in kwargs), 'Driver has no attribute signature_version') self.assertEquals('4', kwargs['signature_version'], 'Signature version is not 4 with temporary credentials') def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(len(protocols), 4) self.assertTrue('tcp' in protocols) self.assertTrue('http' in protocols) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 1) self.assertEqual(balancers[0].id, 'tests') self.assertEqual(balancers[0].name, 'tests') def test_list_balancers_with_tags(self): balancers = self.driver.list_balancers(ex_fetch_tags=True) self.assertEqual(len(balancers), 1) self.assertEqual(balancers[0].id, 'tests') self.assertEqual(balancers[0].name, 'tests') self.assertTrue(('tags' in balancers[0].extra), 'No tags dict found in balancer.extra') self.assertEqual(balancers[0].extra['tags']['project'], 'lima') def test_list_balancer_tags(self): tags = self.driver._ex_list_balancer_tags('tests') self.assertEqual(len(tags), 1) self.assertEqual(tags['project'], 'lima') def test_get_balancer(self): balancer = self.driver.get_balancer(balancer_id='tests') self.assertEqual(balancer.id, 'tests') self.assertEqual(balancer.name, 'tests') self.assertEqual(balancer.state, State.UNKNOWN) def test_get_balancer_with_tags(self): balancer = self.driver.get_balancer(balancer_id='tests', ex_fetch_tags=True) self.assertEqual(balancer.id, 'tests') self.assertEqual(balancer.name, 'tests') self.assertTrue(('tags' in balancer.extra), 'No tags dict found in balancer.extra') self.assertEqual(balancer.extra['tags']['project'], 'lima') def test_populate_balancer_tags(self): balancer = self.driver.get_balancer(balancer_id='tests') balancer = self.driver._ex_populate_balancer_tags(balancer) self.assertEqual(balancer.id, 'tests') self.assertEqual(balancer.name, 'tests') self.assertTrue(('tags' in balancer.extra), 'No tags dict found in balancer.extra') self.assertEqual(balancer.extra['tags']['project'], 'lima') def test_destroy_balancer(self): balancer = self.driver.get_balancer(balancer_id='tests') self.assertTrue(self.driver.destroy_balancer(balancer)) def test_create_balancer(self): members = [Member('srv-lv426', None, None)] balancer = self.driver.create_balancer(name='lb2', port=80, protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members) self.assertEqual(balancer.name, 'lb2') self.assertEqual(balancer.port, 80) self.assertEqual(balancer.state, State.PENDING) def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='tests') members = balancer.list_members() self.assertEqual(len(members), 1) self.assertEqual(members[0].balancer, balancer) self.assertEqual('i-64bd081c', members[0].id) def test_balancer_detach_member(self): balancer = self.driver.get_balancer(balancer_id='lba-1235f') member = Member('i-64bd081c', None, None) self.assertTrue(balancer.detach_member(member)) def test_ex_list_balancer_policies(self): balancer = self.driver.get_balancer(balancer_id='tests') policies = self.driver.ex_list_balancer_policies(balancer) self.assertTrue('MyDurationStickyPolicy' in policies) def test_ex_list_balancer_policy_types(self): policy_types = self.driver.ex_list_balancer_policy_types() self.assertTrue('ProxyProtocolPolicyType' in policy_types) def test_ex_create_balancer_policy(self): self.assertTrue( self.driver.ex_create_balancer_policy( name='tests', policy_name='MyDurationProxyPolicy', policy_type='ProxyProtocolPolicyType')) def test_ex_delete_balancer_policy(self): self.assertTrue( self.driver.ex_delete_balancer_policy( name='tests', policy_name='MyDurationProxyPolicy')) def test_ex_set_balancer_policies_listener(self): self.assertTrue( self.driver.ex_set_balancer_policies_listener( name='tests', port=80, policies=['MyDurationStickyPolicy'])) def test_ex_set_balancer_policies_backend_server(self): self.assertTrue( self.driver.ex_set_balancer_policies_backend_server( name='tests', instance_port=80, policies=['MyDurationProxyPolicy'])) def text_ex_create_balancer_listeners(self): self.assertTrue( self.driver.ex_create_balancer_listeners( name='tests', listeners=[[1024, 65533, 'HTTP']])) class ElasticLBMockHttp(MockHttp): fixtures = LoadBalancerFileFixtures('elb') def _2012_06_01_DescribeLoadBalancers(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DescribeTags(self, method, url, body, headers): body = self.fixtures.load('describe_tags.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_CreateLoadBalancer(self, method, url, body, headers): body = self.fixtures.load('create_load_balancer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DeregisterInstancesFromLoadBalancer(self, method, url, body, headers): body = self.fixtures.load( 'deregister_instances_from_load_balancer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_CreateLoadBalancerPolicy(self, method, url, body, headers): body = self.fixtures.load('create_load_balancer_policy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DeleteLoadBalancer(self, method, url, body, headers): body = '' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DescribeLoadBalancerPolicies(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancer_policies.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DescribeLoadBalancerPolicyTypes(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers_policy_types.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_DeleteLoadBalancerPolicy(self, method, url, body, headers): body = '' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_SetLoadBalancerPoliciesOfListener(self, method, url, body, headers): body = self.fixtures.load('set_load_balancer_policies_of_listener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_06_01_SetLoadBalancerPoliciesForBackendServer(self, method, url, body, headers): body = self.fixtures.load( 'set_load_balancer_policies_for_backend_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/loadbalancer/test_slb.py0000664000175000017500000006202013153541406023774 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.compute.base import Node from libcloud.compute.types import NodeState from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.drivers.slb import SLBDriver, \ SLBLoadBalancerHttpListener, SLBLoadBalancerHttpsListener, \ SLBLoadBalancerTcpListener, SLBLoadBalancerUdpListener from libcloud.loadbalancer.types import State from libcloud.test.file_fixtures import LoadBalancerFileFixtures from libcloud.test import MockHttp from libcloud.test.secrets import LB_SLB_PARAMS from libcloud.utils.py3 import httplib class SLBDriverTestCases(unittest.TestCase): region = LB_SLB_PARAMS[2] def setUp(self): SLBMockHttp.test = self SLBDriver.connectionCls.conn_class = SLBMockHttp SLBMockHttp.type = None SLBMockHttp.use_param = 'Action' self.driver = SLBDriver(*LB_SLB_PARAMS) def test_list_protocols(self): protocols = self.driver.list_protocols() self.assertEqual(4, len(protocols)) expected = ['tcp', 'udp', 'http', 'https'] diff = set(expected) - set(protocols) self.assertEqual(0, len(diff)) def test_list_balancers(self): balancers = self.driver.list_balancers() self.assertEqual(len(balancers), 1) balancer = balancers[0] self.assertEqual('15229f88562-cn-hangzhou-dg-a01', balancer.id) self.assertEqual('abc', balancer.name) self.assertEqual(State.RUNNING, balancer.state) self.assertEqual('120.27.186.149', balancer.ip) self.assertTrue(balancer.port is None) self.assertEqual(self.driver, balancer.driver) expected_extra = { 'create_timestamp': 1452403099000, 'address_type': 'internet', 'region_id': 'cn-hangzhou-dg-a01', 'region_id_alias': 'cn-hangzhou', 'create_time': '2016-01-10T13:18Z', 'master_zone_id': 'cn-hangzhou-d', 'slave_zone_id': 'cn-hangzhou-b', 'network_type': 'classic' } self._validate_extras(expected_extra, balancer.extra) def _validate_extras(self, expected, actual): self.assertTrue(actual is not None) for key, value in iter(expected.items()): self.assertTrue(key in actual) self.assertEqual(value, actual[key], ('extra %(key)s not equal, ' 'expected: "%(expected)s", ' 'actual: "%(actual)s"' % {'key': key, 'expected': value, 'actual': actual[key]})) def test_list_balancers_with_ids(self): SLBMockHttp.type = 'list_balancers_ids' self.balancer_ids = ['id1', 'id2'] balancers = self.driver.list_balancers( ex_balancer_ids=self.balancer_ids) self.assertTrue(balancers is not None) def test_list_balancers_with_ex_filters(self): SLBMockHttp.type = 'list_balancers_filters' self.ex_filters = {'AddressType': 'internet'} balancers = self.driver.list_balancers(ex_filters=self.ex_filters) self.assertTrue(balancers is not None) def test_get_balancer(self): SLBMockHttp.type = 'get_balancer' balancer = self.driver.get_balancer(balancer_id='tests') self.assertEqual(balancer.id, '15229f88562-cn-hangzhou-dg-a01') self.assertEqual(balancer.name, 'abc') self.assertEqual(balancer.state, State.RUNNING) def test_destroy_balancer(self): balancer = self.driver.get_balancer(balancer_id='tests') self.assertTrue(self.driver.destroy_balancer(balancer)) def test_create_balancer(self): self.name = 'balancer1' self.port = 80 self.protocol = 'http' self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN self.extra = { 'ex_address_type': 'internet', 'ex_internet_charge_type': 'paybytraffic', 'ex_bandwidth': 1, 'ex_master_zone_id': 'cn-hangzhou-d', 'ex_slave_zone_id': 'cn-hangzhou-b', 'StickySession': 'on', 'HealthCheck': 'on'} self.members = [Member('node1', None, None)] balancer = self.driver.create_balancer(name=self.name, port=self.port, protocol=self.protocol, algorithm=self.algorithm, members=self.members, **self.extra) self.assertEqual(balancer.name, self.name) self.assertEqual(balancer.port, self.port) self.assertEqual(balancer.state, State.UNKNOWN) def test_create_balancer_no_port_exception(self): self.assertRaises(AttributeError, self.driver.create_balancer, None, None, 'http', Algorithm.WEIGHTED_ROUND_ROBIN, None) def test_create_balancer_unsupport_protocol_exception(self): self.assertRaises(AttributeError, self.driver.create_balancer, None, 443, 'ssl', Algorithm.WEIGHTED_ROUND_ROBIN, None) def test_create_balancer_multiple_member_ports_exception(self): members = [Member('m1', '1.2.3.4', 80), Member('m2', '1.2.3.5', 81)] self.assertRaises(AttributeError, self.driver.create_balancer, None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN, members) def test_create_balancer_bandwidth_value_error(self): self.assertRaises(AttributeError, self.driver.create_balancer, None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN, None, ex_bandwidth='NAN') def test_create_balancer_paybybandwidth_without_bandwidth_exception(self): self.assertRaises(AttributeError, self.driver.create_balancer, None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN, None, ex_internet_charge_type='paybybandwidth') def test_balancer_list_members(self): balancer = self.driver.get_balancer(balancer_id='tests') members = balancer.list_members() self.assertEqual(len(members), 1) self.assertEqual(members[0].balancer, balancer) self.assertEqual('i-23tshnsdq', members[0].id) def test_balancer_list_listeners(self): balancer = self.driver.get_balancer(balancer_id='tests') listeners = self.driver.ex_list_listeners(balancer) self.assertEqual(1, len(listeners)) listener = listeners[0] self.assertEqual('80', listener.port) def test_balancer_detach_member(self): self.balancer = self.driver.get_balancer(balancer_id='tests') self.member = Member('i-23tshnsdq', None, None) self.assertTrue(self.balancer.detach_member(self.member)) def test_balancer_attach_compute_node(self): SLBMockHttp.type = 'attach_compute_node' self.balancer = self.driver.get_balancer(balancer_id='tests') self.node = Node(id='node1', name='node-name', state=NodeState.RUNNING, public_ips=['1.2.3.4'], private_ips=['4.3.2.1'], driver=self.driver) member = self.driver.balancer_attach_compute_node( self.balancer, self.node) self.assertEqual(self.node.id, member.id) self.assertEqual(self.node.public_ips[0], member.ip) self.assertEqual(self.balancer.port, member.port) def test_ex_create_listener(self): SLBMockHttp.type = 'create_listener' self.balancer = self.driver.get_balancer(balancer_id='tests') self.backend_port = 80 self.protocol = 'http' self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN self.bandwidth = 1 self.extra = {'StickySession': 'off', 'HealthCheck': 'off'} self.assertTrue(self.driver.ex_create_listener(self.balancer, self.backend_port, self.protocol, self.algorithm, self.bandwidth, **self.extra)) def test_ex_create_listener_override_port(self): SLBMockHttp.type = 'create_listener_override_port' self.balancer = self.driver.get_balancer(balancer_id='tests') self.backend_port = 80 self.protocol = 'http' self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN self.bandwidth = 1 self.extra = {'StickySession': 'off', 'HealthCheck': 'off', 'ListenerPort': 8080} self.assertTrue(self.driver.ex_create_listener(self.balancer, self.backend_port, self.protocol, self.algorithm, self.bandwidth, **self.extra)) def test_ex_start_listener(self): SLBMockHttp.type = 'start_listener' balancer = self.driver.get_balancer(balancer_id='tests') self.port = 80 self.assertTrue(self.driver.ex_start_listener(balancer, self.port)) def test_ex_stop_listener(self): SLBMockHttp.type = 'stop_listener' balancer = self.driver.get_balancer(balancer_id='tests') self.port = 80 self.assertTrue(self.driver.ex_stop_listener(balancer, self.port)) def test_ex_upload_certificate(self): self.name = 'cert1' self.cert = 'cert-data' self.key = 'key-data' certificate = self.driver.ex_upload_certificate(self.name, self.cert, self.key) self.assertEqual(self.name, certificate.name) self.assertEqual('01:DF:AB:CD', certificate.fingerprint) def test_ex_list_certificates(self): certs = self.driver.ex_list_certificates() self.assertEqual(2, len(certs)) cert = certs[0] self.assertEqual('139a00604ad-cn-east-hangzhou-01', cert.id) self.assertEqual('abe', cert.name) self.assertEqual('A:B:E', cert.fingerprint) def test_ex_list_certificates_ids(self): SLBMockHttp.type = 'list_certificates_ids' self.cert_ids = ['cert1', 'cert2'] certs = self.driver.ex_list_certificates(certificate_ids=self.cert_ids) self.assertTrue(certs is not None) def test_ex_delete_certificate(self): self.cert_id = 'cert1' self.assertTrue(self.driver.ex_delete_certificate(self.cert_id)) def test_ex_set_certificate_name(self): self.cert_id = 'cert1' self.cert_name = 'cert-name' self.assertTrue(self.driver.ex_set_certificate_name(self.cert_id, self.cert_name)) class SLBMockHttp(MockHttp, unittest.TestCase): fixtures = LoadBalancerFileFixtures('slb') def _DescribeLoadBalancers(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _list_balancers_ids_DescribeLoadBalancers(self, method, url, body, headers): params = {'LoadBalancerId': ','.join(self.test.balancer_ids)} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _list_balancers_filters_DescribeLoadBalancers(self, method, url, body, headers): params = {'AddressType': 'internet'} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _get_balancer_DescribeLoadBalancers(self, method, url, body, headers): params = {'LoadBalancerId': 'tests'} self.assertUrlContainsQueryParams(url, params) return self._DescribeLoadBalancers(method, url, body, headers) def _DeleteLoadBalancer(self, method, url, body, headers): params = {'LoadBalancerId': '15229f88562-cn-hangzhou-dg-a01'} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('delete_load_balancer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeLoadBalancerAttribute(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancer_attribute.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateLoadBalancer(self, method, url, body, headers): params = {'RegionId': self.test.region, 'LoadBalancerName': self.test.name} balancer_keys = { 'AddressType': 'ex_address_type', 'InternetChargeType': 'ex_internet_charge_type', 'Bandwidth': 'ex_bandwidth', 'MasterZoneId': 'ex_master_zone_id', 'SlaveZoneId': 'ex_slave_zone_id' } for key in balancer_keys: params[key] = str(self.test.extra[balancer_keys[key]]) self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('create_load_balancer.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _AddBackendServers(self, method, url, body, headers): _id = self.test.members[0].id self.assertTrue("ServerId" in url and _id in url) self.assertTrue("Weight" in url and "100" in url) body = self.fixtures.load('add_backend_servers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _CreateLoadBalancerHTTPListener(self, method, url, body, headers): body = self.fixtures.load('create_load_balancer_http_listener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _StartLoadBalancerListener(self, method, url, body, headers): body = self.fixtures.load('start_load_balancer_listener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _RemoveBackendServers(self, method, url, body, headers): _id = self.test.member.id servers_json = '["%s"]' % _id params = {'LoadBalancerId': self.test.balancer.id, 'BackendServers': servers_json} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('add_backend_servers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _attach_compute_node_DescribeLoadBalancers(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _attach_compute_node_AddBackendServers(self, method, url, body, headers): _id = self.test.node.id self.assertTrue("ServerId" in url and _id in url) self.assertTrue("Weight" in url and "100" in url) body = self.fixtures.load('add_backend_servers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_listener_CreateLoadBalancerHTTPListener(self, method, url, body, headers): params = {'LoadBalancerId': self.test.balancer.id, 'RegionId': self.test.region, 'ListenerPort': str(self.test.balancer.port), 'BackendServerPort': str(self.test.backend_port), 'Scheduler': 'wrr', 'Bandwidth': '1', 'StickySession': 'off', 'HealthCheck': 'off'} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('create_load_balancer_http_listener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_listener_DescribeLoadBalancers(self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_listener_override_port_CreateLoadBalancerHTTPListener( self, method, url, body, headers): params = {'LoadBalancerId': self.test.balancer.id, 'RegionId': self.test.region, 'ListenerPort': str(self.test.extra['ListenerPort']), 'BackendServerPort': str(self.test.backend_port), 'Scheduler': 'wrr', 'Bandwidth': '1', 'StickySession': 'off', 'HealthCheck': 'off'} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('create_load_balancer_http_listener.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _create_listener_override_port_DescribeLoadBalancers( self, method, url, body, headers): body = self.fixtures.load('describe_load_balancers.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _start_listener_DescribeLoadBalancers(self, method, url, body, headers): return self._DescribeLoadBalancers(method, url, body, headers) def _start_listener_StartLoadBalancerListener(self, method, url, body, headers): params = {'ListenerPort': str(self.test.port)} self.assertUrlContainsQueryParams(url, params) return self._StartLoadBalancerListener(method, url, body, headers) def _stop_listener_DescribeLoadBalancers(self, method, url, body, headers): return self._DescribeLoadBalancers(method, url, body, headers) def _stop_listener_StopLoadBalancerListener(self, method, url, body, headers): params = {'ListenerPort': str(self.test.port)} self.assertUrlContainsQueryParams(url, params) return self._StartLoadBalancerListener(method, url, body, headers) def _UploadServerCertificate(self, method, url, body, headers): body = self.fixtures.load('upload_server_certificate.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DescribeServerCertificates(self, method, url, body, headers): body = self.fixtures.load('describe_server_certificates.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _list_certificates_ids_DescribeServerCertificates(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ServerCertificateId': ','.join(self.test.cert_ids)} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('describe_server_certificates.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _DeleteServerCertificate(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ServerCertificateId': self.test.cert_id} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('delete_server_certificate.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _SetServerCertificateName(self, method, url, body, headers): params = {'RegionId': self.test.region, 'ServerCertificateId': self.test.cert_id, 'ServerCertificateName': self.test.cert_name} self.assertUrlContainsQueryParams(url, params) body = self.fixtures.load('set_server_certificate_name.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class AssertDictMixin(object): def assert_dict_equals(self, expected, actual): expected_keys = set(expected.keys()) actual_keys = set(actual.keys()) self.assertEqual(len(expected_keys), len(actual_keys)) self.assertEqual(0, len(expected_keys - actual_keys)) for key in expected: self.assertEqual(expected[key], actual[key]) class SLBLoadBalancerHttpListenerTestCase(unittest.TestCase, AssertDictMixin): def setUp(self): self.listener = SLBLoadBalancerHttpListener.create( 80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1, extra={'StickySession': 'on', 'StickySessionType': 'insert', 'HealthCheck': 'on'} ) def test_get_required_params(self): expected = {'Action': 'CreateLoadBalancerHTTPListener', 'ListenerPort': 80, 'BackendServerPort': 8080, 'Scheduler': 'wrr', 'Bandwidth': 1, 'StickySession': 'on', 'HealthCheck': 'on'} self.assert_dict_equals(expected, self.listener.get_required_params()) def test_get_optional_params(self): expected = {'StickySessionType': 'insert'} self.assert_dict_equals(expected, self.listener.get_optional_params()) def test_repr(self): self.assertTrue('SLBLoadBalancerHttpListener' in str(self.listener)) class SLBLoadBalancerHttpsListenerTestCase(unittest.TestCase, AssertDictMixin): def setUp(self): self.listener = SLBLoadBalancerHttpsListener.create( 80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1, extra={'StickySession': 'on', 'StickySessionType': 'insert', 'HealthCheck': 'on', 'ServerCertificateId': 'fake-cert1'} ) def test_get_required_params(self): expected = {'Action': 'CreateLoadBalancerHTTPSListener', 'ListenerPort': 80, 'BackendServerPort': 8080, 'Scheduler': 'wrr', 'Bandwidth': 1, 'StickySession': 'on', 'HealthCheck': 'on', 'ServerCertificateId': 'fake-cert1'} self.assert_dict_equals(expected, self.listener.get_required_params()) def test_get_optional_params(self): expected = {'StickySessionType': 'insert'} self.assert_dict_equals(expected, self.listener.get_optional_params()) class SLBLoadBalancerTcpListenerTestCase(unittest.TestCase, AssertDictMixin): def setUp(self): self.listener = SLBLoadBalancerTcpListener.create( 80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1, extra={'PersistenceTimeout': 0, 'HealthCheckDomain': ''} ) def test_get_required_params(self): expected = {'Action': 'CreateLoadBalancerTCPListener', 'ListenerPort': 80, 'BackendServerPort': 8080, 'Scheduler': 'wrr', 'Bandwidth': 1} self.assert_dict_equals(expected, self.listener.get_required_params()) def test_get_optional_params(self): expected = {'PersistenceTimeout': 0, 'HealthCheckDomain': ''} self.assert_dict_equals(expected, self.listener.get_optional_params()) class SLBLoadBalancerUdpListenerTestCase(unittest.TestCase, AssertDictMixin): def setUp(self): self.listener = SLBLoadBalancerUdpListener.create( 80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1, extra={'PersistenceTimeout': 0} ) def test_get_required_params(self): expected = {'Action': 'CreateLoadBalancerUDPListener', 'ListenerPort': 80, 'BackendServerPort': 8080, 'Scheduler': 'wrr', 'Bandwidth': 1} self.assert_dict_equals(expected, self.listener.get_required_params()) def test_get_optional_params(self): expected = {'PersistenceTimeout': 0} self.assert_dict_equals(expected, self.listener.get_optional_params()) if __name__ == "__main__": sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/backup/0000775000175000017500000000000013160535107020440 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/backup/test_dimensiondata_v2_3.py0000664000175000017500000005660713153541406025540 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import ET from libcloud.common.dimensiondata import DimensionDataAPIException from libcloud.common.types import InvalidCredsError from libcloud.backup.base import BackupTargetJob from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import BackupFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS class DimensionData_v2_3_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.3' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_targets() def test_list_targets(self): targets = self.driver.list_targets() self.assertEqual(len(targets), 2) self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10') self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise') def test_create_target(self): target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.extra['servicePlan'], 'Enterprise') def test_create_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') def test_create_target_EXISTS(self): DimensionDataMockHttp.type = 'EXISTS' with self.assertRaises(DimensionDataAPIException) as context: self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).') def test_update_target(self): target = self.driver.list_targets()[0] extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_update_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' self.driver.update_target(target) def test_update_target_STR(self): target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_delete_target(self): target = self.driver.list_targets()[0] self.assertTrue(self.driver.delete_target(target)) def test_ex_add_client_to_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_list_available_client_types(target)[0] storage_policy = self.driver.ex_list_available_storage_policies(target)[0] schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0] self.assertTrue( self.driver.ex_add_client_to_target(target, client, storage_policy, schedule_policy, 'ON_FAILURE', 'nobody@example.com') ) def test_ex_add_client_to_target_STR(self): self.assertTrue( self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy', '12AM - 6AM', 'ON_FAILURE', 'nobody@example.com') ) def test_ex_get_backup_details_for_target(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) self.assertEqual(response.service_plan, 'Enterprise') client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertEqual(client.running_job.progress, 5) self.assertTrue(isinstance(client.running_job, BackupTargetJob)) self.assertEqual(len(client.alert.notify_list), 2) self.assertTrue(isinstance(client.alert.notify_list, list)) def test_ex_get_backup_details_for_target_NOBACKUP(self): target = self.driver.list_targets()[0].address DimensionDataMockHttp.type = 'NOBACKUP' response = self.driver.ex_get_backup_details_for_target(target) self.assertTrue(response is None) def test_ex_cancel_target_job(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) client = response.clients[0] self.assertTrue(isinstance(client.running_job, BackupTargetJob)) success = client.running_job.cancel() self.assertTrue(success) def test_ex_cancel_target_job_with_extras(self): success = self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertTrue(success) def test_ex_cancel_target_job_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertEqual(context.exception.code, 'ERROR') """Test a backup info for a target that does not have a client""" def test_ex_get_backup_details_for_target_NO_CLIENT(self): DimensionDataMockHttp.type = 'NOCLIENT' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Essentials') self.assertEqual(len(response.clients), 0) """Test a backup details that has a client, but no alerting or running jobs""" def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self): DimensionDataMockHttp.type = 'NOJOB' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Enterprise') self.assertTrue(isinstance(response.clients, list)) self.assertEqual(len(response.clients), 1) client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertIsNone(client.running_job) self.assertIsNone(client.alert) """Test getting backup info for a server that doesn't exist""" def test_ex_get_backup_details_for_target_DISABLED(self): DimensionDataMockHttp.type = 'DISABLED' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup') def test_ex_list_available_client_types(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_client_types(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].type, 'FA.Linux') self.assertEqual(answer[0].is_file_system, True) self.assertEqual(answer[0].description, 'Linux File system') def test_ex_list_available_storage_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_storage_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '30 Day Storage Policy + Secondary Copy') self.assertEqual(answer[0].retention_period, 30) self.assertEqual(answer[0].secondary_location, 'Primary') def test_ex_list_available_schedule_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_schedule_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '12AM - 6AM') self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM') def test_ex_remove_client_from_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertTrue(self.driver.ex_remove_client_from_target(target, client)) def test_ex_remove_client_from_target_STR(self): self.assertTrue( self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) ) def test_ex_remove_client_from_target_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) self.assertEqual(context.exception.code, 'ERROR') self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg) def test_priv_target_to_target_address(self): target = self.driver.list_targets()[0] self.assertEqual( self.driver._target_to_target_address(target), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_STR(self): self.assertEqual( self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._target_to_target_address([1, 2, 3]) def test_priv_client_to_client_id(self): client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertEqual( self.driver._client_to_client_id(client), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_STR(self): self.assertEqual( self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._client_to_client_id([1, 2, 3]) class InvalidRequestError(Exception): def __init__(self, tag): super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag) class DimensionDataMockHttp(MockHttp): fixtures = BackupFileFixtures('dimensiondata') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_FAIL(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers): body = self.fixtures.load( 'server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers): body = self.fixtures.load( '_backup_client_type.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_storagePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_schedulePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_client_SUCCESS_PUT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOCLIENT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_DISABLED.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOJOB.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT( self, method, url, body, headers): if method != 'POST': raise InvalidRequestError('Only POST is accepted for this test') request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan)) body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'GET': if url.endswith('disable'): body = self.fixtures.load( '_backup_DISABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( '_backup_INFO.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP( self, method, url, body, headers): assert(method == 'GET') body = self.fixtures.load('server_server_NOBACKUP.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS( self, method, url, body, headers): # only POSTs are implemented # If we get any other method something has gone wrong assert(method == 'POST') body = self.fixtures.load( '_backup_EXISTS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != 'Essentials': raise InvalidRequestError("Expected Essentials backup plan in request") body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN) body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client_FAIL.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/backup/__init__.py0000664000175000017500000000262512701023453022552 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.backup.base import BackupTarget, BackupTargetType class TestCaseMixin(object): def get_supported_target_types(self): targets = self.driver.get_supported_target_types() self.assertTrue(isinstance(targets, list)) for target in targets: self.assertTrue(isinstance(target, BackupTargetType)) def test_list_targets_response(self): targets = self.driver.list_targets() self.assertTrue(isinstance(targets, list)) for target in targets: self.assertTrue(isinstance(target, BackupTarget)) if __name__ == "__main__": import doctest doctest.testmod() apache-libcloud-2.2.1/libcloud/test/backup/fixtures/0000775000175000017500000000000013160535107022311 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/0000775000175000017500000000000013160535107025130 5ustar kamikami00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/oec_0_9_myaccount.xml0000664000175000017500000000303412701023453031145 0ustar kamikami00000000000000 testuser Test User Test User test@example.com 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 create image reports server primary administrator network apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_DISABLE.xml0000664000175000017500000000231413153541406030402 0ustar kamikami00000000000000 Disable Backup for Server SUCCESS Backup disabled for Server REASON_0 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_NOJOB.xml0000664000175000017500000000333613153541406031026 0ustar kamikami00000000000000 Linux File Agent 12AM - 6AM 14 Day Storage Policy 0 https://backups-na.cloud-vpn.net/PCS/BackupClientInstallerDownload/cbb8a8c607ca4144e8828814edfc1634c8dd8782 ././@LongLink0000000000000000000000000000021100000000000011207 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xmlapache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_0000664000175000017500000000231113153541406033034 0ustar kamikami00000000000000 Cancel Backup Job ERROR No backup job currently running on client REASON_547 ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xmlapache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_0000664000175000017500000000226313153541406033042 0ustar kamikami00000000000000 Cancel Backup Job SUCCESS Backup Job Canceled REASON_0 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_type.xml0000664000175000017500000000045513153541406031662 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_DISABLED.xml0000664000175000017500000000236013153541406031322 0ustar kamikami00000000000000 Get Backup Details ERROR Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup REASON_543 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_modify.xml0000664000175000017500000000230413153541406030625 0ustar kamikami00000000000000 Change Backup Service Plan SUCCESS Backup Service Plan changed REASON_0 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_storagePolicy.xml0000664000175000017500000000057013153541406033523 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_remove_backup_client.xml0000664000175000017500000000226312701023453032170 0ustar kamikami00000000000000 Disable Backup Client SUCCESS Backup Client disabled REASON_0 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_ENABLE.xml0000664000175000017500000000274713153541406030277 0ustar kamikami00000000000000 Enable Backup for Server SUCCESS Backup enabled for Server - see additional Information for Asset ID REASON_0 ee7c4b64-f7af-4a4f-8384-be362273530f apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_schedulePolicy.xml0000664000175000017500000000047213153541406033654 0ustar kamikami00000000000000 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_remove_backup_client_FAIL.xml0000664000175000017500000000274412703467102032774 0ustar kamikami00000000000000 Disable Backup Client ERROR DISABLE_BACKUP_CLIENT 'didata-backup-test6[172-16-1-14]' - failed - Unexpected error occurred with NA9 Backup system at 2016-02-12 00:03:50.952, TransactionId: (9d483a7a-1cc9-441b-920c-e11fb0e94ba6), PCSOperation: DeprovisionBackupClient, Backup Client is currently performing another operation: Backup client is currently busy REASON_547 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_EXISTS.xml0000664000175000017500000000063413153541406030361 0ustar kamikami00000000000000 Enable Backup for Server ERROR Cloud backup for this server is already enabled or being enabled (state: NORMAL). REASON_550 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server_NOBACKUP.xml0000664000175000017500000000344513153541406032037 0ustar kamikami00000000000000 Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server.xml0000664000175000017500000000604112704221640030544 0ustar kamikami00000000000000 Production Web Server MCP 1 nopassword0 2 e9ec6eb4-4634-49de-b914-01eb74da5fb9 2015-08-11T16:51:05.000Z true true NORMAL Production Web Server MCP 2 Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE SHUTDOWN_SERVER 2015-12-02T11:07:40.000Z devuser1 ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xmlapache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_80000664000175000017500000000327012704221640032717 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE DEPLOY_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_client_SUCCESS_PUT.xml0000664000175000017500000000312313153541406032534 0ustar kamikami00000000000000 Add Backup Client SUCCESS Backup Client added REASON_0 a26cead0-6bab-4446-9a16-c227e6ab201f https://backups-na.cloud-vpn.net/PCS/BackupClientInstallerDownload/4b3f35815b848ce7270186b52e5817fa5de0fe42 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_INFO.xml0000664000175000017500000000402413153541406030072 0ustar kamikami00000000000000 Linux File Agent 12AM - 6AM 14 Day Storage Policy fake_email@example.com fake_email2@example.com 0 https://backups-na.cloud-vpn.net/PCS/BackupClientInstallerDownload/cbb8a8c607ca4144e8828814edfc1634c8dd8782 apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_NOCLIENT.xml0000664000175000017500000000207213153541406031366 0ustar kamikami00000000000000 ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000apache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xmlapache-libcloud-2.2.1/libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_80000664000175000017500000000327212704221640032721 0ustar kamikami00000000000000 Production Web Server Server to host our main web application. 4 MSSQL2008R2S 3ebf3c0f-90fe-4a8b-8585-6e65b316592c 2015-12-02T10:31:33.000Z true true PENDING_CHANGE DEPLOY_SERVER 2015-12-02T11:07:40.000Z devuser1 apache-libcloud-2.2.1/libcloud/test/backup/test_dimensiondata_v2_4.py0000664000175000017500000005757013153541406025541 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import ET from libcloud.common.dimensiondata import DimensionDataAPIException from libcloud.common.types import InvalidCredsError from libcloud.backup.base import BackupTargetJob from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import BackupFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS class DimensionData_v2_4_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.4' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_targets() def test_list_targets(self): targets = self.driver.list_targets() self.assertEqual(len(targets), 2) self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10') self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise') def test_create_target(self): target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.extra['servicePlan'], 'Enterprise') def test_create_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') def test_create_target_EXISTS(self): DimensionDataMockHttp.type = 'EXISTS' with self.assertRaises(DimensionDataAPIException) as context: self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).') def test_update_target(self): target = self.driver.list_targets()[0] extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_update_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' self.driver.update_target(target) def test_update_target_STR(self): target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_delete_target(self): target = self.driver.list_targets()[0] self.assertTrue(self.driver.delete_target(target)) def test_ex_add_client_to_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_list_available_client_types(target)[0] storage_policy = self.driver.ex_list_available_storage_policies(target)[0] schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0] self.assertTrue( self.driver.ex_add_client_to_target(target, client, storage_policy, schedule_policy, 'ON_FAILURE', 'nobody@example.com') ) def test_ex_add_client_to_target_STR(self): self.assertTrue( self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy', '12AM - 6AM', 'ON_FAILURE', 'nobody@example.com') ) def test_ex_get_backup_details_for_target(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) self.assertEqual(response.service_plan, 'Enterprise') client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertEqual(client.running_job.progress, 5) self.assertTrue(isinstance(client.running_job, BackupTargetJob)) self.assertEqual(len(client.alert.notify_list), 2) self.assertTrue(isinstance(client.alert.notify_list, list)) def test_ex_get_backup_details_for_target_NOBACKUP(self): target = self.driver.list_targets()[0].address DimensionDataMockHttp.type = 'NOBACKUP' response = self.driver.ex_get_backup_details_for_target(target) self.assertTrue(response is None) def test_ex_cancel_target_job(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) client = response.clients[0] self.assertTrue(isinstance(client.running_job, BackupTargetJob)) success = client.running_job.cancel() self.assertTrue(success) def test_ex_cancel_target_job_with_extras(self): success = self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertTrue(success) def test_ex_cancel_target_job_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertEqual(context.exception.code, 'ERROR') """Test a backup info for a target that does not have a client""" def test_ex_get_backup_details_for_target_NO_CLIENT(self): DimensionDataMockHttp.type = 'NOCLIENT' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Essentials') self.assertEqual(len(response.clients), 0) """Test a backup details that has a client, but no alerting or running jobs""" def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self): DimensionDataMockHttp.type = 'NOJOB' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Enterprise') self.assertTrue(isinstance(response.clients, list)) self.assertEqual(len(response.clients), 1) client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertIsNone(client.running_job) self.assertIsNone(client.alert) """Test getting backup info for a server that doesn't exist""" def test_ex_get_backup_details_for_target_DISABLED(self): DimensionDataMockHttp.type = 'DISABLED' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup') def test_ex_list_available_client_types(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_client_types(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].type, 'FA.Linux') self.assertEqual(answer[0].is_file_system, True) self.assertEqual(answer[0].description, 'Linux File system') def test_ex_list_available_storage_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_storage_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '30 Day Storage Policy + Secondary Copy') self.assertEqual(answer[0].retention_period, 30) self.assertEqual(answer[0].secondary_location, 'Primary') def test_ex_list_available_schedule_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_schedule_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '12AM - 6AM') self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM') def test_ex_remove_client_from_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertTrue(self.driver.ex_remove_client_from_target(target, client)) def test_ex_remove_client_from_target_STR(self): self.assertTrue( self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) ) def test_ex_remove_client_from_target_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) self.assertEqual(context.exception.code, 'ERROR') self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg) def test_priv_target_to_target_address(self): target = self.driver.list_targets()[0] self.assertEqual( self.driver._target_to_target_address(target), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_STR(self): self.assertEqual( self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._target_to_target_address([1, 2, 3]) def test_priv_client_to_client_id(self): client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertEqual( self.driver._client_to_client_id(client), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_STR(self): self.assertEqual( self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._client_to_client_id([1, 2, 3]) class InvalidRequestError(Exception): def __init__(self, tag): super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag) class DimensionDataMockHttp(MockHttp): fixtures = BackupFileFixtures('dimensiondata') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_FAIL(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers): body = self.fixtures.load( 'server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers): body = self.fixtures.load( '_backup_client_type.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_storagePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_schedulePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_client_SUCCESS_PUT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOCLIENT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_DISABLED.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOJOB.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT( self, method, url, body, headers): if method != 'POST': raise InvalidRequestError('Only POST is accepted for this test') request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan)) body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'GET': if url.endswith('disable'): body = self.fixtures.load( '_backup_DISABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( '_backup_INFO.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP( self, method, url, body, headers): assert(method == 'GET') body = self.fixtures.load('server_server_NOBACKUP.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS( self, method, url, body, headers): # only POSTs are implemented # If we get any other method something has gone wrong assert(method == 'POST') body = self.fixtures.load( '_backup_EXISTS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != 'Essentials': raise InvalidRequestError("Expected Essentials backup plan in request") body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN) body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client_FAIL.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/test_types.py0000664000175000017500000000714112701023453021747 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.common.types import LazyList class TestLazyList(unittest.TestCase): def setUp(self): super(TestLazyList, self).setUp self._get_more_counter = 0 def tearDown(self): super(TestLazyList, self).tearDown def test_init(self): data = [1, 2, 3, 4, 5] ll = LazyList(get_more=self._get_more_exhausted) ll_list = list(ll) self.assertEqual(ll_list, data) def test_iterator(self): data = [1, 2, 3, 4, 5] ll = LazyList(get_more=self._get_more_exhausted) for i, d in enumerate(ll): self.assertEqual(d, data[i]) def test_empty_list(self): ll = LazyList(get_more=self._get_more_empty) self.assertEqual(list(ll), []) self.assertEqual(len(ll), 0) self.assertTrue(10 not in ll) def test_iterator_not_exhausted(self): data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ll = LazyList(get_more=self._get_more_not_exhausted) number_of_iterations = 0 for i, d in enumerate(ll): self.assertEqual(d, data[i]) number_of_iterations += 1 self.assertEqual(number_of_iterations, 10) def test_len(self): ll = LazyList(get_more=self._get_more_not_exhausted) ll = LazyList(get_more=self._get_more_not_exhausted) self.assertEqual(len(ll), 10) def test_contains(self): ll = LazyList(get_more=self._get_more_not_exhausted) self.assertTrue(40 not in ll) self.assertTrue(1 in ll) self.assertTrue(5 in ll) self.assertTrue(10 in ll) def test_indexing(self): ll = LazyList(get_more=self._get_more_not_exhausted) self.assertEqual(ll[0], 1) self.assertEqual(ll[9], 10) self.assertEqual(ll[-1], 10) try: ll[11] except IndexError: pass else: self.fail('Exception was not thrown') def test_repr(self): ll1 = LazyList(get_more=self._get_more_empty) ll2 = LazyList(get_more=self._get_more_exhausted) ll3 = LazyList(get_more=self._get_more_not_exhausted) self.assertEqual(repr(ll1), '[]') self.assertEqual(repr(ll2), '[1, 2, 3, 4, 5]') self.assertEqual(repr(ll3), '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]') def _get_more_empty(self, last_key, value_dict): return [], None, True def _get_more_exhausted(self, last_key, value_dict): data = [1, 2, 3, 4, 5] return data, 5, True def _get_more_not_exhausted(self, last_key, value_dict): self._get_more_counter += 1 if not last_key: data, last_key, exhausted = [1, 2, 3, 4, 5], 5, False else: data, last_key, exhausted = [6, 7, 8, 9, 10], 10, True return data, last_key, exhausted if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/libcloud/test/test_logging_connection.py0000664000175000017500000000532113153541406024453 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from io import StringIO import zlib import requests_mock import libcloud from libcloud.test import unittest from libcloud.common.base import Connection from libcloud.http import LibcloudConnection from libcloud.utils.loggingconnection import LoggingConnection class TestLoggingConnection(unittest.TestCase): def tearDown(self): Connection.conn_class = LibcloudConnection def test_debug_method_uses_log_class(self): with StringIO() as fh: libcloud.enable_debug(fh) conn = Connection(timeout=10) conn.connect() self.assertTrue(isinstance(conn.connection, LoggingConnection)) def test_debug_log_class_handles_request(self): with StringIO() as fh: libcloud.enable_debug(fh) conn = Connection(url='http://test.com/') conn.connect() self.assertEqual(conn.connection.host, 'http://test.com') with requests_mock.mock() as m: m.get('http://test.com/test', text='data') conn.request('/test') log = fh.getvalue() self.assertTrue(isinstance(conn.connection, LoggingConnection)) self.assertIn('-i -X GET', log) self.assertIn('data', log) def test_debug_log_class_handles_request_with_compression(self): request = zlib.compress(b'data') with StringIO() as fh: libcloud.enable_debug(fh) conn = Connection(url='http://test.com/') conn.connect() self.assertEqual(conn.connection.host, 'http://test.com') with requests_mock.mock() as m: m.get('http://test.com/test', content=request, headers={'content-encoding': 'zlib'}) conn.request('/test') log = fh.getvalue() self.assertTrue(isinstance(conn.connection, LoggingConnection)) self.assertIn('-i -X GET', log) if __name__ == '__main__': sys.exit(unittest.main()) apache-libcloud-2.2.1/example_compute.py0000664000175000017500000000257312701023453020163 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver EC2 = get_driver(Provider.EC2) Rackspace = get_driver(Provider.RACKSPACE) drivers = [EC2('access key id', 'secret key', region='us-east-1'), Rackspace('username', 'api key', region='iad')] nodes = [driver.list_nodes() for driver in drivers] print(nodes) # [ , # , ... ] # grab the node named "test" node = [n for n in nodes if n.name == 'test'][0] # reboot "test" node.reboot() apache-libcloud-2.2.1/apache_libcloud.egg-info/0000775000175000017500000000000013160535107021207 5ustar kamikami00000000000000apache-libcloud-2.2.1/apache_libcloud.egg-info/requires.txt0000664000175000017500000000001113160535106023576 0ustar kamikami00000000000000requests apache-libcloud-2.2.1/apache_libcloud.egg-info/SOURCES.txt0000664000175000017500000043766513160535107023120 0ustar kamikami00000000000000.pylintrc CHANGES.rst LICENSE MANIFEST.in NOTICE README.rst example_compute.py example_dns.py example_loadbalancer.py example_storage.py requirements-tests.txt setup.cfg setup.py tox.ini apache_libcloud.egg-info/PKG-INFO apache_libcloud.egg-info/SOURCES.txt apache_libcloud.egg-info/dependency_links.txt apache_libcloud.egg-info/not-zip-safe apache_libcloud.egg-info/requires.txt apache_libcloud.egg-info/top_level.txt contrib/Dockerfile contrib/__init__.py contrib/apply-patch.ps1 contrib/generate_contributor_list.py contrib/generate_provider_feature_matrix_table.py contrib/generate_provider_logos_collage_image.py contrib/migrate_paths.sh contrib/pre-commit.sh contrib/run_tests.sh contrib/scrape-ec2-prices.py contrib/trigger_rtd_build.py contrib/update_google_prices.py contrib/utils.py contrib/utils_test.py demos/__init__.py demos/compute_demo.py demos/example_aliyun_ecs.py demos/example_aliyun_oss.py demos/example_aliyun_slb.py demos/example_openstack.py demos/gce_demo.py demos/secrets.py-dist libcloud/__init__.py libcloud/base.py libcloud/http.py libcloud/pricing.py libcloud/security.py libcloud/backup/__init__.py libcloud/backup/base.py libcloud/backup/providers.py libcloud/backup/types.py libcloud/backup/drivers/__init__.py libcloud/backup/drivers/dimensiondata.py libcloud/backup/drivers/dummy.py libcloud/backup/drivers/ebs.py libcloud/backup/drivers/gce.py libcloud/common/__init__.py libcloud/common/abiquo.py libcloud/common/aliyun.py libcloud/common/aws.py libcloud/common/azure.py libcloud/common/azure_arm.py libcloud/common/base.py libcloud/common/brightbox.py libcloud/common/buddyns.py libcloud/common/cloudsigma.py libcloud/common/cloudstack.py libcloud/common/digitalocean.py libcloud/common/dimensiondata.py libcloud/common/dnsimple.py libcloud/common/dnspod.py libcloud/common/durabledns.py libcloud/common/exceptions.py libcloud/common/gandi.py libcloud/common/gogrid.py libcloud/common/google.py libcloud/common/hostvirtual.py libcloud/common/linode.py libcloud/common/liquidweb.py libcloud/common/luadns.py libcloud/common/nfsn.py libcloud/common/nsone.py libcloud/common/onapp.py libcloud/common/openstack.py libcloud/common/openstack_identity.py libcloud/common/ovh.py libcloud/common/pointdns.py libcloud/common/providers.py libcloud/common/rackspace.py libcloud/common/softlayer.py libcloud/common/types.py libcloud/common/vultr.py libcloud/common/worldwidedns.py libcloud/common/xmlrpc.py libcloud/common/zonomi.py libcloud/compute/__init__.py libcloud/compute/base.py libcloud/compute/deployment.py libcloud/compute/deprecated.py libcloud/compute/providers.py libcloud/compute/ssh.py libcloud/compute/types.py libcloud/compute/drivers/__init__.py libcloud/compute/drivers/abiquo.py libcloud/compute/drivers/auroracompute.py libcloud/compute/drivers/azure.py libcloud/compute/drivers/azure_arm.py libcloud/compute/drivers/bluebox.py libcloud/compute/drivers/brightbox.py libcloud/compute/drivers/bsnl.py libcloud/compute/drivers/cloudscale.py libcloud/compute/drivers/cloudsigma.py libcloud/compute/drivers/cloudstack.py libcloud/compute/drivers/cloudwatt.py libcloud/compute/drivers/digitalocean.py libcloud/compute/drivers/dimensiondata.py libcloud/compute/drivers/dummy.py libcloud/compute/drivers/ec2.py libcloud/compute/drivers/ecp.py libcloud/compute/drivers/ecs.py libcloud/compute/drivers/elastichosts.py libcloud/compute/drivers/elasticstack.py libcloud/compute/drivers/exoscale.py libcloud/compute/drivers/gandi.py libcloud/compute/drivers/gce.py libcloud/compute/drivers/gogrid.py libcloud/compute/drivers/gridspot.py libcloud/compute/drivers/hostvirtual.py libcloud/compute/drivers/ikoula.py libcloud/compute/drivers/indosat.py libcloud/compute/drivers/internetsolutions.py libcloud/compute/drivers/joyent.py libcloud/compute/drivers/kili.py libcloud/compute/drivers/ktucloud.py libcloud/compute/drivers/libvirt_driver.py libcloud/compute/drivers/linode.py libcloud/compute/drivers/medone.py libcloud/compute/drivers/nephoscale.py libcloud/compute/drivers/ntta.py libcloud/compute/drivers/onapp.py libcloud/compute/drivers/oneandone.py libcloud/compute/drivers/opennebula.py libcloud/compute/drivers/openstack.py libcloud/compute/drivers/ovh.py libcloud/compute/drivers/packet.py libcloud/compute/drivers/profitbricks.py libcloud/compute/drivers/rackspace.py libcloud/compute/drivers/rimuhosting.py libcloud/compute/drivers/serverlove.py libcloud/compute/drivers/skalicloud.py libcloud/compute/drivers/softlayer.py libcloud/compute/drivers/vcl.py libcloud/compute/drivers/vcloud.py libcloud/compute/drivers/voxel.py libcloud/compute/drivers/vpsnet.py libcloud/compute/drivers/vsphere.py libcloud/compute/drivers/vultr.py libcloud/container/__init__.py libcloud/container/base.py libcloud/container/providers.py libcloud/container/types.py libcloud/container/drivers/__init__.py libcloud/container/drivers/docker.py libcloud/container/drivers/dummy.py libcloud/container/drivers/ecs.py libcloud/container/drivers/gke.py libcloud/container/drivers/joyent.py libcloud/container/drivers/kubernetes.py libcloud/container/drivers/rancher.py libcloud/container/utils/__init__.py libcloud/container/utils/docker.py libcloud/data/pricing.json libcloud/dns/__init__.py libcloud/dns/base.py libcloud/dns/providers.py libcloud/dns/types.py libcloud/dns/drivers/__init__.py libcloud/dns/drivers/auroradns.py libcloud/dns/drivers/buddyns.py libcloud/dns/drivers/cloudflare.py libcloud/dns/drivers/digitalocean.py libcloud/dns/drivers/dnsimple.py libcloud/dns/drivers/dnspod.py libcloud/dns/drivers/dummy.py libcloud/dns/drivers/durabledns.py libcloud/dns/drivers/gandi.py libcloud/dns/drivers/godaddy.py libcloud/dns/drivers/google.py libcloud/dns/drivers/hostvirtual.py libcloud/dns/drivers/linode.py libcloud/dns/drivers/liquidweb.py libcloud/dns/drivers/luadns.py libcloud/dns/drivers/nfsn.py libcloud/dns/drivers/nsone.py libcloud/dns/drivers/onapp.py libcloud/dns/drivers/pointdns.py libcloud/dns/drivers/powerdns.py libcloud/dns/drivers/rackspace.py libcloud/dns/drivers/route53.py libcloud/dns/drivers/softlayer.py libcloud/dns/drivers/vultr.py libcloud/dns/drivers/worldwidedns.py libcloud/dns/drivers/zerigo.py libcloud/dns/drivers/zonomi.py libcloud/loadbalancer/__init__.py libcloud/loadbalancer/base.py libcloud/loadbalancer/providers.py libcloud/loadbalancer/types.py libcloud/loadbalancer/drivers/__init__.py libcloud/loadbalancer/drivers/alb.py libcloud/loadbalancer/drivers/brightbox.py libcloud/loadbalancer/drivers/cloudstack.py libcloud/loadbalancer/drivers/dimensiondata.py libcloud/loadbalancer/drivers/elb.py libcloud/loadbalancer/drivers/gce.py libcloud/loadbalancer/drivers/gogrid.py libcloud/loadbalancer/drivers/ninefold.py libcloud/loadbalancer/drivers/rackspace.py libcloud/loadbalancer/drivers/slb.py libcloud/loadbalancer/drivers/softlayer.py libcloud/storage/__init__.py libcloud/storage/base.py libcloud/storage/providers.py libcloud/storage/types.py libcloud/storage/drivers/__init__.py libcloud/storage/drivers/atmos.py libcloud/storage/drivers/auroraobjects.py libcloud/storage/drivers/azure_blobs.py libcloud/storage/drivers/backblaze_b2.py libcloud/storage/drivers/cloudfiles.py libcloud/storage/drivers/dummy.py libcloud/storage/drivers/google_storage.py libcloud/storage/drivers/ktucloud.py libcloud/storage/drivers/local.py libcloud/storage/drivers/nimbus.py libcloud/storage/drivers/ninefold.py libcloud/storage/drivers/oss.py libcloud/storage/drivers/rgw.py libcloud/storage/drivers/s3.py libcloud/test/__init__.py libcloud/test/conftest.py libcloud/test/file_fixtures.py libcloud/test/pricing_test.json libcloud/test/secrets.py-dist libcloud/test/test_connection.py libcloud/test/test_file_fixtures.py libcloud/test/test_http.py libcloud/test/test_init.py libcloud/test/test_logging_connection.py libcloud/test/test_pricing.py libcloud/test/test_response_classes.py libcloud/test/test_types.py libcloud/test/test_utils.py libcloud/test/backup/__init__.py libcloud/test/backup/test_dimensiondata_v2_3.py libcloud/test/backup/test_dimensiondata_v2_4.py libcloud/test/backup/fixtures/dimensiondata/_backup_DISABLE.xml libcloud/test/backup/fixtures/dimensiondata/_backup_ENABLE.xml libcloud/test/backup/fixtures/dimensiondata/_backup_EXISTS.xml libcloud/test/backup/fixtures/dimensiondata/_backup_INFO.xml libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_DISABLED.xml libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_NOCLIENT.xml libcloud/test/backup/fixtures/dimensiondata/_backup_INFO_NOJOB.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_SUCCESS_PUT.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_schedulePolicy.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_storagePolicy.xml libcloud/test/backup/fixtures/dimensiondata/_backup_client_type.xml libcloud/test/backup/fixtures/dimensiondata/_backup_modify.xml libcloud/test/backup/fixtures/dimensiondata/_remove_backup_client.xml libcloud/test/backup/fixtures/dimensiondata/_remove_backup_client_FAIL.xml libcloud/test/backup/fixtures/dimensiondata/oec_0_9_myaccount.xml libcloud/test/backup/fixtures/dimensiondata/server_server.xml libcloud/test/backup/fixtures/dimensiondata/server_server_NOBACKUP.xml libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml libcloud/test/backup/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml libcloud/test/common/__init__.py libcloud/test/common/test_aliyun.py libcloud/test/common/test_aws.py libcloud/test/common/test_base.py libcloud/test/common/test_base_driver.py libcloud/test/common/test_cloudstack.py libcloud/test/common/test_digitalocean_v2.py libcloud/test/common/test_gandi.py libcloud/test/common/test_google.py libcloud/test/common/test_nfsn.py libcloud/test/common/test_openstack.py libcloud/test/common/test_openstack_identity.py libcloud/test/common/test_ovh.py libcloud/test/common/test_retry_limit.py libcloud/test/common/fixtures/digitalocean/_v1_events_12345670.json libcloud/test/common/fixtures/digitalocean/_v1_events_12345670_UNAUTHORIZED.json libcloud/test/common/fixtures/digitalocean/_v2_account.json libcloud/test/common/fixtures/digitalocean/_v2_account_UNAUTHORIZED.json libcloud/test/common/fixtures/digitalocean/_v2_actions.json libcloud/test/common/fixtures/digitalocean/_v2_actions_12345670.json libcloud/test/common/fixtures/digitalocean/_v2_actions_page_1.json libcloud/test/common/fixtures/digitalocean/_v2_actions_page_2.json libcloud/test/common/fixtures/google/pkey.json libcloud/test/common/fixtures/google/pkey.pem libcloud/test/compute/__init__.py libcloud/test/compute/test_abiquo.py libcloud/test/compute/test_auroracompute.py libcloud/test/compute/test_azure.py libcloud/test/compute/test_azure_arm.py libcloud/test/compute/test_base.py libcloud/test/compute/test_bluebox.py libcloud/test/compute/test_brightbox.py libcloud/test/compute/test_bsnl.py libcloud/test/compute/test_cloudscale.py libcloud/test/compute/test_cloudsigma_v1_0.py libcloud/test/compute/test_cloudsigma_v2_0.py libcloud/test/compute/test_cloudstack.py libcloud/test/compute/test_cloudwatt.py libcloud/test/compute/test_deployment.py libcloud/test/compute/test_digitalocean_v2.py libcloud/test/compute/test_dimensiondata_v2_3.py libcloud/test/compute/test_dimensiondata_v2_4.py libcloud/test/compute/test_ec2.py libcloud/test/compute/test_ecp.py libcloud/test/compute/test_ecs.py libcloud/test/compute/test_elasticstack.py libcloud/test/compute/test_exoscale.py libcloud/test/compute/test_gandi.py libcloud/test/compute/test_gce.py libcloud/test/compute/test_gogrid.py libcloud/test/compute/test_gridspot.py libcloud/test/compute/test_hostvirtual.py libcloud/test/compute/test_ikoula.py libcloud/test/compute/test_indosat.py libcloud/test/compute/test_internetsolutions.py libcloud/test/compute/test_joyent.py libcloud/test/compute/test_kili.py libcloud/test/compute/test_ktucloud.py libcloud/test/compute/test_libvirt_driver.py libcloud/test/compute/test_linode.py libcloud/test/compute/test_medone.py libcloud/test/compute/test_nephoscale.py libcloud/test/compute/test_ntta.py libcloud/test/compute/test_onapp.py libcloud/test/compute/test_oneandone.py libcloud/test/compute/test_opennebula.py libcloud/test/compute/test_openstack.py libcloud/test/compute/test_ovh.py libcloud/test/compute/test_packet.py libcloud/test/compute/test_profitbricks.py libcloud/test/compute/test_rackspace.py libcloud/test/compute/test_rimuhosting.py libcloud/test/compute/test_softlayer.py libcloud/test/compute/test_ssh_client.py libcloud/test/compute/test_types.py libcloud/test/compute/test_vcl.py libcloud/test/compute/test_vcloud.py libcloud/test/compute/test_voxel.py libcloud/test/compute/test_vpsnet.py libcloud/test/compute/test_vultr.py libcloud/test/compute/fixtures/abiquo/dcs.xml libcloud/test/compute/fixtures/abiquo/ent_1.xml libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml libcloud/test/compute/fixtures/abiquo/login.xml libcloud/test/compute/fixtures/abiquo/not_found_error.xml libcloud/test/compute/fixtures/abiquo/privilege_errors.html libcloud/test/compute/fixtures/abiquo/unauthorized_user.html libcloud/test/compute/fixtures/abiquo/vdc_4.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml libcloud/test/compute/fixtures/abiquo/vdcs.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_storageservices_dss123.xml libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml libcloud/test/compute/fixtures/azure/libcloud.pem libcloud/test/compute/fixtures/azure_arm/_77777777_7777_7777_7777_777777777777_oauth2_token.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Commerce_RateCard.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Compute.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Compute_disks.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Compute_locations_eastus_vmSizes.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Compute_snapshots.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_providers_Microsoft_Compute_virtualMachines.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_disks_test_disk_1.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_snapshots_test_snap_1.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_virtualMachines_test_node_1.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_virtualMachines_test_node_1_InstanceView.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Network_networkInterfaces_test_node_1_nic.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_111111_providers_Microsoft_Compute_disks.json libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_111111_providers_Microsoft_Compute_snapshots.json libcloud/test/compute/fixtures/bluebox/api_block_products_json.json libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json libcloud/test/compute/fixtures/bluebox/api_blocks_json.json libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json libcloud/test/compute/fixtures/brightbox/create_server.json libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json libcloud/test/compute/fixtures/brightbox/list_images.json libcloud/test/compute/fixtures/brightbox/list_server_types.json libcloud/test/compute/fixtures/brightbox/list_servers.json libcloud/test/compute/fixtures/brightbox/list_zones.json libcloud/test/compute/fixtures/brightbox/token.json libcloud/test/compute/fixtures/cloudscale/create_node.json libcloud/test/compute/fixtures/cloudscale/list_images.json libcloud/test/compute/fixtures/cloudscale/list_nodes.json libcloud/test/compute/fixtures/cloudscale/list_sizes.json libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt libcloud/test/compute/fixtures/cloudsigma/drives_info.txt libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt libcloud/test/compute/fixtures/cloudsigma/servers_create.txt libcloud/test/compute/fixtures/cloudsigma/servers_info.txt libcloud/test/compute/fixtures/cloudsigma/servers_set.txt libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json libcloud/test/compute/fixtures/cloudstack/addNicToVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json libcloud/test/compute/fixtures/cloudstack/createAffinityGroup_default.json libcloud/test/compute/fixtures/cloudstack/createEgressFirewallRule_default.json libcloud/test/compute/fixtures/cloudstack/createFirewallRule_default.json libcloud/test/compute/fixtures/cloudstack/createFirewallRule_firewallicmp.json libcloud/test/compute/fixtures/cloudstack/createNetworkACLList_default.json libcloud/test/compute/fixtures/cloudstack/createNetworkACL_default.json libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json libcloud/test/compute/fixtures/cloudstack/createSnapshot_default.json libcloud/test/compute/fixtures/cloudstack/createTags_default.json libcloud/test/compute/fixtures/cloudstack/createTemplate_default.json libcloud/test/compute/fixtures/cloudstack/createVPC_default.json libcloud/test/compute/fixtures/cloudstack/createVolume_default.json libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json libcloud/test/compute/fixtures/cloudstack/createVolume_withvolumetype.json libcloud/test/compute/fixtures/cloudstack/createVpnConnection_default.json libcloud/test/compute/fixtures/cloudstack/createVpnCustomerGateway_default.json libcloud/test/compute/fixtures/cloudstack/createVpnGateway_default.json libcloud/test/compute/fixtures/cloudstack/deleteAffinityGroup_default.json libcloud/test/compute/fixtures/cloudstack/deleteEgressFirewallRule_default.json libcloud/test/compute/fixtures/cloudstack/deleteFirewallRule_default.json libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json libcloud/test/compute/fixtures/cloudstack/deleteSnapshot_default.json libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json libcloud/test/compute/fixtures/cloudstack/deleteVPC_default.json libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json libcloud/test/compute/fixtures/cloudstack/deleteVpnConnection_default.json libcloud/test/compute/fixtures/cloudstack/deleteVpnCustomerGateway_default.json libcloud/test/compute/fixtures/cloudstack/deleteVpnGateway_default.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployip.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploynetworks.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_stoppedvm.json libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub libcloud/test/compute/fixtures/cloudstack/listAffinityGroupTypes_default.json libcloud/test/compute/fixtures/cloudstack/listAffinityGroups_default.json libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withvolumetype.json libcloud/test/compute/fixtures/cloudstack/listEgressFirewallRules_default.json libcloud/test/compute/fixtures/cloudstack/listFirewallRules_default.json libcloud/test/compute/fixtures/cloudstack/listFirewallRules_firewallicmp.json libcloud/test/compute/fixtures/cloudstack/listIpForwardingRules_default.json libcloud/test/compute/fixtures/cloudstack/listNetworkACLLists_default.json libcloud/test/compute/fixtures/cloudstack/listNetworkACLs_default.json libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json libcloud/test/compute/fixtures/cloudstack/listNetworks_deployip.json libcloud/test/compute/fixtures/cloudstack/listNetworks_deploynetworks.json libcloud/test/compute/fixtures/cloudstack/listNetworks_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/listNetworks_stoppedvm.json libcloud/test/compute/fixtures/cloudstack/listNics_default.json libcloud/test/compute/fixtures/cloudstack/listOsTypes_default.json libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json libcloud/test/compute/fixtures/cloudstack/listProjects_default.json libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_firewallicmp.json libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json libcloud/test/compute/fixtures/cloudstack/listRouters_default.json libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deployip.json libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deploynetworks.json libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_stoppedvm.json libcloud/test/compute/fixtures/cloudstack/listSnapshots_default.json libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json libcloud/test/compute/fixtures/cloudstack/listTemplates_deployip.json libcloud/test/compute/fixtures/cloudstack/listTemplates_deploynetworks.json libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json libcloud/test/compute/fixtures/cloudstack/listTemplates_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/listTemplates_stoppedvm.json libcloud/test/compute/fixtures/cloudstack/listVPCOfferings_default.json libcloud/test/compute/fixtures/cloudstack/listVPCs_default.json libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json libcloud/test/compute/fixtures/cloudstack/listVolumes_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/listVpnConnections_default.json libcloud/test/compute/fixtures/cloudstack/listVpnCustomerGateways_default.json libcloud/test/compute/fixtures/cloudstack/listVpnGateways_default.json libcloud/test/compute/fixtures/cloudstack/listZones_default.json libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json libcloud/test/compute/fixtures/cloudstack/listZones_deployip.json libcloud/test/compute/fixtures/cloudstack/listZones_deploynetworks.json libcloud/test/compute/fixtures/cloudstack/listZones_rootdisksize.json libcloud/test/compute/fixtures/cloudstack/listZones_stoppedvm.json libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json libcloud/test/compute/fixtures/cloudstack/listZones_withvolumetype.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_111112.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149341.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149342.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149343.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149366.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300001.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300002.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300003.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300004.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300005.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300006.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_addnictovm.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createVpnConnection.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createVpnCustomerGateway.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createVpnGateway.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVPC.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVpnConnection.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVpnCustomerGateway.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVpnGateway.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployrootdisksize.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmstopped.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmwithid.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_removenic.json libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json libcloud/test/compute/fixtures/cloudstack/removeNicFromVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json libcloud/test/compute/fixtures/cloudstack/updateVMAffinityGroup_default.json libcloud/test/compute/fixtures/digitalocean/create_image.json libcloud/test/compute/fixtures/digitalocean/create_key_pair.json libcloud/test/compute/fixtures/digitalocean/error.json libcloud/test/compute/fixtures/digitalocean/ex_change_kernel.json libcloud/test/compute/fixtures/digitalocean/ex_hard_reboot.json libcloud/test/compute/fixtures/digitalocean/ex_power_on_node.json libcloud/test/compute/fixtures/digitalocean/ex_shutdown_node.json libcloud/test/compute/fixtures/digitalocean/get_image.json libcloud/test/compute/fixtures/digitalocean/list_key_pairs.json libcloud/test/compute/fixtures/digitalocean/list_nodes_page_1.json libcloud/test/compute/fixtures/digitalocean_v2/attach_volume.json libcloud/test/compute/fixtures/digitalocean_v2/create_image.json libcloud/test/compute/fixtures/digitalocean_v2/create_key_pair.json libcloud/test/compute/fixtures/digitalocean_v2/create_node.json libcloud/test/compute/fixtures/digitalocean_v2/create_volume.json libcloud/test/compute/fixtures/digitalocean_v2/create_volume_snapshot.json libcloud/test/compute/fixtures/digitalocean_v2/detach_volume.json libcloud/test/compute/fixtures/digitalocean_v2/error.json libcloud/test/compute/fixtures/digitalocean_v2/error_invalid_image.json libcloud/test/compute/fixtures/digitalocean_v2/ex_change_kernel.json libcloud/test/compute/fixtures/digitalocean_v2/ex_hard_reboot.json libcloud/test/compute/fixtures/digitalocean_v2/ex_power_on_node.json libcloud/test/compute/fixtures/digitalocean_v2/ex_rename_node.json libcloud/test/compute/fixtures/digitalocean_v2/ex_shutdown_node.json libcloud/test/compute/fixtures/digitalocean_v2/get_image.json libcloud/test/compute/fixtures/digitalocean_v2/list_images.json libcloud/test/compute/fixtures/digitalocean_v2/list_key_pairs.json libcloud/test/compute/fixtures/digitalocean_v2/list_locations.json libcloud/test/compute/fixtures/digitalocean_v2/list_nodes.json libcloud/test/compute/fixtures/digitalocean_v2/list_nodes_empty.json libcloud/test/compute/fixtures/digitalocean_v2/list_nodes_page_1.json libcloud/test/compute/fixtures/digitalocean_v2/list_sizes.json libcloud/test/compute/fixtures/digitalocean_v2/list_volume_snapshots.json libcloud/test/compute/fixtures/digitalocean_v2/list_volumes.json libcloud/test/compute/fixtures/digitalocean_v2/list_volumes_empty.json libcloud/test/compute/fixtures/digitalocean_v2/reboot_node.json libcloud/test/compute/fixtures/dimensiondata/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml libcloud/test/compute/fixtures/dimensiondata/audit_log.csv libcloud/test/compute/fixtures/dimensiondata/detailed_usage_report.csv libcloud/test/compute/fixtures/dimensiondata/image_customerImage.xml libcloud/test/compute/fixtures/dimensiondata/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml libcloud/test/compute/fixtures/dimensiondata/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml libcloud/test/compute/fixtures/dimensiondata/image_customerImage_BAD_REQUEST.xml libcloud/test/compute/fixtures/dimensiondata/image_osImage.xml libcloud/test/compute/fixtures/dimensiondata/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml libcloud/test/compute/fixtures/dimensiondata/image_osImage_BAD_REQUEST.xml libcloud/test/compute/fixtures/dimensiondata/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml libcloud/test/compute/fixtures/dimensiondata/infrastructure_datacenter.xml libcloud/test/compute/fixtures/dimensiondata/infrastructure_datacenter_NA9.xml libcloud/test/compute/fixtures/dimensiondata/ip_address_list_create.xml libcloud/test/compute/fixtures/dimensiondata/ip_address_list_delete.xml libcloud/test/compute/fixtures/dimensiondata/ip_address_list_edit.xml libcloud/test/compute/fixtures/dimensiondata/ip_address_lists.xml libcloud/test/compute/fixtures/dimensiondata/ip_address_lists_FILTERBYNAME.xml libcloud/test/compute/fixtures/dimensiondata/network_addPublicIpBlock.xml libcloud/test/compute/fixtures/dimensiondata/network_createFirewallRule.xml libcloud/test/compute/fixtures/dimensiondata/network_createNatRule.xml libcloud/test/compute/fixtures/dimensiondata/network_deleteFirewallRule.xml libcloud/test/compute/fixtures/dimensiondata/network_deleteNatRule.xml libcloud/test/compute/fixtures/dimensiondata/network_deleteNetworkDomain.xml libcloud/test/compute/fixtures/dimensiondata/network_deleteVlan.xml libcloud/test/compute/fixtures/dimensiondata/network_deployNetworkDomain.xml libcloud/test/compute/fixtures/dimensiondata/network_deployVlan.xml libcloud/test/compute/fixtures/dimensiondata/network_editFirewallRule.xml libcloud/test/compute/fixtures/dimensiondata/network_editNetworkDomain.xml libcloud/test/compute/fixtures/dimensiondata/network_editVlan.xml libcloud/test/compute/fixtures/dimensiondata/network_expandVlan.xml libcloud/test/compute/fixtures/dimensiondata/network_firewallRule.xml libcloud/test/compute/fixtures/dimensiondata/network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml libcloud/test/compute/fixtures/dimensiondata/network_natRule.xml libcloud/test/compute/fixtures/dimensiondata/network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml libcloud/test/compute/fixtures/dimensiondata/network_networkDomain.xml libcloud/test/compute/fixtures/dimensiondata/network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock.xml libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml libcloud/test/compute/fixtures/dimensiondata/network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml libcloud/test/compute/fixtures/dimensiondata/network_removePublicIpBlock.xml libcloud/test/compute/fixtures/dimensiondata/network_vlan.xml libcloud/test/compute/fixtures/dimensiondata/network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_base_image.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_base_imageWithDiskSpeed.xml libcloud/test/compute/fixtures/dimensiondata/oec_0_9_myaccount.xml libcloud/test/compute/fixtures/dimensiondata/port_list_create.xml libcloud/test/compute/fixtures/dimensiondata/port_list_delete.xml libcloud/test/compute/fixtures/dimensiondata/port_list_edit.xml libcloud/test/compute/fixtures/dimensiondata/port_list_get.xml libcloud/test/compute/fixtures/dimensiondata/port_list_lists.xml libcloud/test/compute/fixtures/dimensiondata/report_usageMonitoring.xml libcloud/test/compute/fixtures/dimensiondata/server_GetServer.xml libcloud/test/compute/fixtures/dimensiondata/server_addDisk.xml libcloud/test/compute/fixtures/dimensiondata/server_addNic.xml libcloud/test/compute/fixtures/dimensiondata/server_antiAffinityRule_list.xml libcloud/test/compute/fixtures/dimensiondata/server_antiAffinityRule_list_PAGINATED.xml libcloud/test/compute/fixtures/dimensiondata/server_changeServerMonitoringPlan.xml libcloud/test/compute/fixtures/dimensiondata/server_cleanServer.xml libcloud/test/compute/fixtures/dimensiondata/server_deleteServer.xml libcloud/test/compute/fixtures/dimensiondata/server_deleteServer_RESOURCEBUSY.xml libcloud/test/compute/fixtures/dimensiondata/server_deployServer.xml libcloud/test/compute/fixtures/dimensiondata/server_disableServerMonitoring.xml libcloud/test/compute/fixtures/dimensiondata/server_enableServerMonitoring.xml libcloud/test/compute/fixtures/dimensiondata/server_powerOffServer.xml libcloud/test/compute/fixtures/dimensiondata/server_powerOffServer_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/server_rebootServer.xml libcloud/test/compute/fixtures/dimensiondata/server_rebootServer_RESOURCEBUSY.xml libcloud/test/compute/fixtures/dimensiondata/server_reconfigureServer.xml libcloud/test/compute/fixtures/dimensiondata/server_removeDisk.xml libcloud/test/compute/fixtures/dimensiondata/server_removeNic.xml libcloud/test/compute/fixtures/dimensiondata/server_resetServer.xml libcloud/test/compute/fixtures/dimensiondata/server_server.xml libcloud/test/compute/fixtures/dimensiondata/server_server_NA3.xml libcloud/test/compute/fixtures/dimensiondata/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml libcloud/test/compute/fixtures/dimensiondata/server_server_paginated.xml libcloud/test/compute/fixtures/dimensiondata/server_server_paginated_empty.xml libcloud/test/compute/fixtures/dimensiondata/server_shutdownServer.xml libcloud/test/compute/fixtures/dimensiondata/server_shutdownServer_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/server_startServer.xml libcloud/test/compute/fixtures/dimensiondata/server_startServer_INPROGRESS.xml libcloud/test/compute/fixtures/dimensiondata/server_updateVmwareTools.xml libcloud/test/compute/fixtures/dimensiondata/summary_usage_report.csv libcloud/test/compute/fixtures/dimensiondata/tag_applyTags.xml libcloud/test/compute/fixtures/dimensiondata/tag_applyTags_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_createTagKey.xml libcloud/test/compute/fixtures/dimensiondata/tag_createTagKey_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_deleteTagKey.xml libcloud/test/compute/fixtures/dimensiondata/tag_deleteTagKey_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_editTagKey.xml libcloud/test/compute/fixtures/dimensiondata/tag_editTagKey_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_removeTag.xml libcloud/test/compute/fixtures/dimensiondata/tag_removeTag_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xml libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xml libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_list.xml libcloud/test/compute/fixtures/dimensiondata/tag_tagKey_list_SINGLE.xml libcloud/test/compute/fixtures/dimensiondata/tag_tag_list.xml libcloud/test/compute/fixtures/dimensiondata/2.4/change_nic_networkadapter_response.xml libcloud/test/compute/fixtures/dimensiondata/2.4/deploy_customised_server.xml libcloud/test/compute/fixtures/dimensiondata/2.4/exchange_nic_vlans_response.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml libcloud/test/compute/fixtures/dimensiondata/2.4/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml libcloud/test/compute/fixtures/dimensiondata/2.4/import_image_response.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_GetServer.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_cleanServer.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_clone_response.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_server.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_NA3.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml libcloud/test/compute/fixtures/dimensiondata/2.4/server_server_paginated.xml libcloud/test/compute/fixtures/ec2/allocate_address.xml libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml libcloud/test/compute/fixtures/ec2/associate_address.xml libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml libcloud/test/compute/fixtures/ec2/attach_network_interface.xml libcloud/test/compute/fixtures/ec2/attach_volume.xml libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml libcloud/test/compute/fixtures/ec2/copy_image.xml libcloud/test/compute/fixtures/ec2/create_image.xml libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml libcloud/test/compute/fixtures/ec2/create_key_pair.xml libcloud/test/compute/fixtures/ec2/create_network_interface.xml libcloud/test/compute/fixtures/ec2/create_placement_groups.xml libcloud/test/compute/fixtures/ec2/create_security_group.xml libcloud/test/compute/fixtures/ec2/create_snapshot.xml libcloud/test/compute/fixtures/ec2/create_subnet.xml libcloud/test/compute/fixtures/ec2/create_tags.xml libcloud/test/compute/fixtures/ec2/create_volume.xml libcloud/test/compute/fixtures/ec2/create_vpc.xml libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml libcloud/test/compute/fixtures/ec2/delete_key_pair.xml libcloud/test/compute/fixtures/ec2/delete_network_interface.xml libcloud/test/compute/fixtures/ec2/delete_placement_groups.xml libcloud/test/compute/fixtures/ec2/delete_security_group.xml libcloud/test/compute/fixtures/ec2/delete_snapshot.xml libcloud/test/compute/fixtures/ec2/delete_subnet.xml libcloud/test/compute/fixtures/ec2/delete_tags.xml libcloud/test/compute/fixtures/ec2/delete_volume.xml libcloud/test/compute/fixtures/ec2/delete_vpc.xml libcloud/test/compute/fixtures/ec2/deregister_image.xml libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml libcloud/test/compute/fixtures/ec2/describe_addresses.xml libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml libcloud/test/compute/fixtures/ec2/describe_images.xml libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml libcloud/test/compute/fixtures/ec2/describe_import_snapshot_tasks.xml libcloud/test/compute/fixtures/ec2/describe_import_snapshot_tasks_active.xml libcloud/test/compute/fixtures/ec2/describe_instance_types.xml libcloud/test/compute/fixtures/ec2/describe_instances.xml libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml libcloud/test/compute/fixtures/ec2/describe_placement_groups.xml libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml libcloud/test/compute/fixtures/ec2/describe_security_groups.xml libcloud/test/compute/fixtures/ec2/describe_snapshots.xml libcloud/test/compute/fixtures/ec2/describe_subnets.xml libcloud/test/compute/fixtures/ec2/describe_tags.xml libcloud/test/compute/fixtures/ec2/describe_volumes.xml libcloud/test/compute/fixtures/ec2/describe_volumes_modifications.xml libcloud/test/compute/fixtures/ec2/describe_vpcs.xml libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml libcloud/test/compute/fixtures/ec2/detach_network_interface.xml libcloud/test/compute/fixtures/ec2/detach_volume.xml libcloud/test/compute/fixtures/ec2/disassociate_address.xml libcloud/test/compute/fixtures/ec2/get_console_output.xml libcloud/test/compute/fixtures/ec2/import_key_pair.xml libcloud/test/compute/fixtures/ec2/import_snapshot.xml libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml libcloud/test/compute/fixtures/ec2/modify_snapshot_attribute.xml libcloud/test/compute/fixtures/ec2/modify_volume.xml libcloud/test/compute/fixtures/ec2/reboot_instances.xml libcloud/test/compute/fixtures/ec2/register_image.xml libcloud/test/compute/fixtures/ec2/release_address.xml libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml libcloud/test/compute/fixtures/ec2/run_instances.xml libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml libcloud/test/compute/fixtures/ec2/run_instances_idem.xml libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml libcloud/test/compute/fixtures/ec2/run_instances_with_subnet_and_security_group.xml libcloud/test/compute/fixtures/ec2/start_instances.xml libcloud/test/compute/fixtures/ec2/stop_instances.xml libcloud/test/compute/fixtures/ec2/terminate_instances.xml libcloud/test/compute/fixtures/ecp/htemplate_list.json libcloud/test/compute/fixtures/ecp/network_list.json libcloud/test/compute/fixtures/ecp/ptemplate_list.json libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json libcloud/test/compute/fixtures/ecp/vm_1_action_start.json libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json libcloud/test/compute/fixtures/ecp/vm_1_get.json libcloud/test/compute/fixtures/ecp/vm_list.json libcloud/test/compute/fixtures/ecp/vm_put.json libcloud/test/compute/fixtures/ecs/attach_disk.xml libcloud/test/compute/fixtures/ecs/copy_image.xml libcloud/test/compute/fixtures/ecs/create_disk.xml libcloud/test/compute/fixtures/ecs/create_image.xml libcloud/test/compute/fixtures/ecs/create_instance.xml libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml libcloud/test/compute/fixtures/ecs/create_public_ip.xml libcloud/test/compute/fixtures/ecs/create_security_group.xml libcloud/test/compute/fixtures/ecs/create_snapshot.xml libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml libcloud/test/compute/fixtures/ecs/delete_disk.xml libcloud/test/compute/fixtures/ecs/delete_image.xml libcloud/test/compute/fixtures/ecs/delete_instance.xml libcloud/test/compute/fixtures/ecs/delete_security_group_by_id.xml libcloud/test/compute/fixtures/ecs/delete_snapshot.xml libcloud/test/compute/fixtures/ecs/describe_disks.xml libcloud/test/compute/fixtures/ecs/describe_images.xml libcloud/test/compute/fixtures/ecs/describe_instance_types.xml libcloud/test/compute/fixtures/ecs/describe_instances.xml libcloud/test/compute/fixtures/ecs/describe_regions.xml libcloud/test/compute/fixtures/ecs/describe_security_group_attributes.xml libcloud/test/compute/fixtures/ecs/describe_security_groups.xml libcloud/test/compute/fixtures/ecs/describe_snapshots.xml libcloud/test/compute/fixtures/ecs/describe_zones.xml libcloud/test/compute/fixtures/ecs/destroy_node_describe_instances.xml libcloud/test/compute/fixtures/ecs/destroy_volume_describe_disks.xml libcloud/test/compute/fixtures/ecs/detach_disk.xml libcloud/test/compute/fixtures/ecs/detach_volume_describe_disks.xml libcloud/test/compute/fixtures/ecs/get_image_describe_images.xml libcloud/test/compute/fixtures/ecs/join_security_group_by_id.xml libcloud/test/compute/fixtures/ecs/leave_security_group_by_id.xml libcloud/test/compute/fixtures/ecs/pages_describe_images.xml libcloud/test/compute/fixtures/ecs/pages_describe_images_page2.xml libcloud/test/compute/fixtures/ecs/reboot_instance.xml libcloud/test/compute/fixtures/ecs/reboot_node_describe_instances.xml libcloud/test/compute/fixtures/ecs/start_instance.xml libcloud/test/compute/fixtures/ecs/stop_instance.xml libcloud/test/compute/fixtures/ecs/stop_node_describe_instances.xml libcloud/test/compute/fixtures/elastichosts/drives_create.json libcloud/test/compute/fixtures/elastichosts/drives_info.json libcloud/test/compute/fixtures/elastichosts/offline_servers_info.json libcloud/test/compute/fixtures/elastichosts/servers_create.json libcloud/test/compute/fixtures/elastichosts/servers_info.json libcloud/test/compute/fixtures/fcu/ex_describe_instance_types.xml libcloud/test/compute/fixtures/fcu/ex_describe_product_types.xml libcloud/test/compute/fixtures/fcu/ex_describe_quotas.xml libcloud/test/compute/fixtures/fcu/ex_get_product_type.xml libcloud/test/compute/fixtures/fcu/ex_modify_instance_keypair.xml libcloud/test/compute/fixtures/gandi/account_info.xml libcloud/test/compute/fixtures/gandi/account_info_rating.xml libcloud/test/compute/fixtures/gandi/datacenter_list.xml libcloud/test/compute/fixtures/gandi/disk_attach.xml libcloud/test/compute/fixtures/gandi/disk_create.xml libcloud/test/compute/fixtures/gandi/disk_create_from.xml libcloud/test/compute/fixtures/gandi/disk_delete.xml libcloud/test/compute/fixtures/gandi/disk_detach.xml libcloud/test/compute/fixtures/gandi/disk_info.xml libcloud/test/compute/fixtures/gandi/disk_list.xml libcloud/test/compute/fixtures/gandi/disk_update.xml libcloud/test/compute/fixtures/gandi/iface_attach.xml libcloud/test/compute/fixtures/gandi/iface_detach.xml libcloud/test/compute/fixtures/gandi/iface_list.xml libcloud/test/compute/fixtures/gandi/image_list_dc0.xml libcloud/test/compute/fixtures/gandi/ip_list.xml libcloud/test/compute/fixtures/gandi/operation_info.xml libcloud/test/compute/fixtures/gandi/ssh_delete.xml libcloud/test/compute/fixtures/gandi/ssh_info.xml libcloud/test/compute/fixtures/gandi/ssh_list.xml libcloud/test/compute/fixtures/gandi/vm_create_from.xml libcloud/test/compute/fixtures/gandi/vm_delete.xml libcloud/test/compute/fixtures/gandi/vm_info.xml libcloud/test/compute/fixtures/gandi/vm_list.xml libcloud/test/compute/fixtures/gandi/vm_reboot.xml libcloud/test/compute/fixtures/gandi/vm_stop.xml libcloud/test/compute/fixtures/gce/_zones_us_central1_a_instanceGroupManagers_myinstancegroup_listManagedInstances.json libcloud/test/compute/fixtures/gce/aggregated_addresses.json libcloud/test/compute/fixtures/gce/aggregated_autoscalers.json libcloud/test/compute/fixtures/gce/aggregated_disks.json libcloud/test/compute/fixtures/gce/aggregated_disktypes.json libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json libcloud/test/compute/fixtures/gce/aggregated_instanceGroupManagers.json libcloud/test/compute/fixtures/gce/aggregated_instances.json libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json libcloud/test/compute/fixtures/gce/aggregated_subnetworks.json libcloud/test/compute/fixtures/gce/aggregated_targetInstances.json libcloud/test/compute/fixtures/gce/aggregated_targetPools.json libcloud/test/compute/fixtures/gce/generic_disk.json libcloud/test/compute/fixtures/gce/global_addresses.json libcloud/test/compute/fixtures/gce/global_addresses_lcaddressglobal.json libcloud/test/compute/fixtures/gce/global_addresses_lcaddressglobal_delete.json libcloud/test/compute/fixtures/gce/global_addresses_post.json libcloud/test/compute/fixtures/gce/global_backendServices-empty.json libcloud/test/compute/fixtures/gce/global_backendServices-web-service.json libcloud/test/compute/fixtures/gce/global_backendServices_no_backends.json libcloud/test/compute/fixtures/gce/global_backendServices_post.json libcloud/test/compute/fixtures/gce/global_backendServices_web_service.json libcloud/test/compute/fixtures/gce/global_backendServices_web_service_delete.json libcloud/test/compute/fixtures/gce/global_firewalls.json libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json libcloud/test/compute/fixtures/gce/global_firewalls_post.json libcloud/test/compute/fixtures/gce/global_forwardingRules.json libcloud/test/compute/fixtures/gce/global_forwardingRules_http_rule.json libcloud/test/compute/fixtures/gce/global_forwardingRules_http_rule_delete.json libcloud/test/compute/fixtures/gce/global_forwardingRules_post.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json libcloud/test/compute/fixtures/gce/global_images.json libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131014_deprecate.json libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20131120_delete.json libcloud/test/compute/fixtures/gce/global_images_family_notfound.json libcloud/test/compute/fixtures/gce/global_images_post.json libcloud/test/compute/fixtures/gce/global_instanceTemplates.json libcloud/test/compute/fixtures/gce/global_instanceTemplates_insert.json libcloud/test/compute/fixtures/gce/global_instanceTemplates_my_instance_template1.json libcloud/test/compute/fixtures/gce/global_networks.json libcloud/test/compute/fixtures/gce/global_networks_cf.json libcloud/test/compute/fixtures/gce/global_networks_custom_network.json libcloud/test/compute/fixtures/gce/global_networks_default.json libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json libcloud/test/compute/fixtures/gce/global_networks_post.json libcloud/test/compute/fixtures/gce/global_routes.json libcloud/test/compute/fixtures/gce/global_routes_lcdemoroute.json libcloud/test/compute/fixtures/gce/global_routes_lcdemoroute_delete.json libcloud/test/compute/fixtures/gce/global_routes_post.json libcloud/test/compute/fixtures/gce/global_snapshots.json libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json libcloud/test/compute/fixtures/gce/global_sslcertificates.json libcloud/test/compute/fixtures/gce/global_sslcertificates_example.json libcloud/test/compute/fixtures/gce/global_sslcertificates_post.json libcloud/test/compute/fixtures/gce/global_targetHttpProxies.json libcloud/test/compute/fixtures/gce/global_targetHttpProxies_post.json libcloud/test/compute/fixtures/gce/global_targetHttpProxies_web_proxy.json libcloud/test/compute/fixtures/gce/global_targetHttpProxies_web_proxy_delete.json libcloud/test/compute/fixtures/gce/global_urlMaps.json libcloud/test/compute/fixtures/gce/global_urlMaps_post.json libcloud/test/compute/fixtures/gce/global_urlMaps_web_map.json libcloud/test/compute/fixtures/gce/global_urlMaps_web_map_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_lcaddressglobal_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_addresses_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_backendServices_web_service_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules_http_rule_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_forwardingRules_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_instanceTemplates_insert.json libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_routes_lcdemoroute_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_routes_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_sslcertificates_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxies_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_targetHttpProxies_web_proxy_delete.json libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_post.json libcloud/test/compute/fixtures/gce/operations_operation_global_urlMaps_web_map_delete.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_subnetworks_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lb_pool_setBackup_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json libcloud/test/compute/fixtures/gce/operations_operation_setCommonInstanceMetadata.json libcloud/test/compute/fixtures/gce/operations_operation_setUsageExportBucket.json libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_done.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_addAccessConfig_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_done.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node_name_deleteAccessConfig_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_targetInstances_lctargetinstance_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_targetInstances_post.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_insert.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_addInstances.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_delete.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_removeInstances.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_instanceGroups_myname_setNamedPorts.json libcloud/test/compute/fixtures/gce/operations_operation_zones_us_central1_a_node_name_setMetadata_post.json libcloud/test/compute/fixtures/gce/project.json libcloud/test/compute/fixtures/gce/projects_centos-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_coreos-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_coreos-cloud_global_images_family_coreos.json libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_gce-nvme_global_images.json libcloud/test/compute/fixtures/gce/projects_google-containers_global_images.json libcloud/test/compute/fixtures/gce/projects_opensuse-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_rhel-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_rhel-cloud_global_licenses_rhel_server.json libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_11.json libcloud/test/compute/fixtures/gce/projects_suse-cloud_global_licenses_sles_12.json libcloud/test/compute/fixtures/gce/projects_ubuntu-os-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_windows-cloud_global_images.json libcloud/test/compute/fixtures/gce/projects_windows-cloud_global_licenses_windows_server_2008_r2_dc.json libcloud/test/compute/fixtures/gce/regions-paged-1.json libcloud/test/compute/fixtures/gce/regions-paged-2.json libcloud/test/compute/fixtures/gce/regions.json libcloud/test/compute/fixtures/gce/regions_asia-east1.json libcloud/test/compute/fixtures/gce/regions_europe-west1.json libcloud/test/compute/fixtures/gce/regions_us-central1.json libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks.json libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks_cf_972cf02e6ad49112.json libcloud/test/compute/fixtures/gce/regions_us-central1_subnetworks_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_backup_pool.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_getHealth.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lb_pool_setBackup_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json libcloud/test/compute/fixtures/gce/regions_us-east1.json libcloud/test/compute/fixtures/gce/regions_us-east1_subnetworks_cf_972cf02e6ad49113.json libcloud/test/compute/fixtures/gce/setCommonInstanceMetadata_post.json libcloud/test/compute/fixtures/gce/setUsageExportBucket_post.json libcloud/test/compute/fixtures/gce/zones.json libcloud/test/compute/fixtures/gce/zones_asia-east1-b.json libcloud/test/compute/fixtures/gce/zones_europe-west1-a_diskTypes_pd_standard.json libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json libcloud/test/compute/fixtures/gce/zones_us-central1-a.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes_pd_ssd.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_diskTypes_pd_standard.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_disktypes_pd-ssd.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_insert.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroupManagers_myinstancegroup.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstancegroup.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instanceGroup_myinstancegroup2.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node_name_getSerialOutput.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_sn-node-name.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_operations_operation_zones_us-central1-a_instanceGroupManagers_insert_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctargetinstance.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_lctargetinstance_delete.json libcloud/test/compute/fixtures/gce/zones_us-central1-a_targetInstances_post.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroupManagers_myinstancegroup.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instanceGroup_myinstancegroup.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-nopubip-001.json libcloud/test/compute/fixtures/gce/zones_us-east1-b.json libcloud/test/compute/fixtures/gce/zones_us-east1-b_instanceGroupManagers.json libcloud/test/compute/fixtures/gce/zones_us-east1-b_instanceGroup_myinstancegroup.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_insert.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_addInstances.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_delete.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_listInstances.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_removeInstances.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instanceGroups_myname_setNamedPorts.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_custom_node.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setMachineType.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setMetadata_post.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_stop.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_setMachineType.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_start.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_stopped_node_stop.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_setMachineType.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_setMachineType_notstopped.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_startnode.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_stopnode.json libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json libcloud/test/compute/fixtures/gogrid/image_list.json libcloud/test/compute/fixtures/gogrid/image_save.json libcloud/test/compute/fixtures/gogrid/ip_list.json libcloud/test/compute/fixtures/gogrid/ip_list_empty.json libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json libcloud/test/compute/fixtures/gogrid/password_list.json libcloud/test/compute/fixtures/gogrid/server_add.json libcloud/test/compute/fixtures/gogrid/server_delete.json libcloud/test/compute/fixtures/gogrid/server_edit.json libcloud/test/compute/fixtures/gogrid/server_list.json libcloud/test/compute/fixtures/gogrid/server_power.json libcloud/test/compute/fixtures/gogrid/server_power_fail.json libcloud/test/compute/fixtures/hostvirtual/cancel_package.json libcloud/test/compute/fixtures/hostvirtual/get_node.json libcloud/test/compute/fixtures/hostvirtual/list_images.json libcloud/test/compute/fixtures/hostvirtual/list_locations.json libcloud/test/compute/fixtures/hostvirtual/list_nodes.json libcloud/test/compute/fixtures/hostvirtual/list_packages.json libcloud/test/compute/fixtures/hostvirtual/list_sizes.json libcloud/test/compute/fixtures/hostvirtual/node_reboot.json libcloud/test/compute/fixtures/hostvirtual/node_start.json libcloud/test/compute/fixtures/hostvirtual/node_stop.json libcloud/test/compute/fixtures/hostvirtual/order_package.json libcloud/test/compute/fixtures/hostvirtual/unlink_package.json libcloud/test/compute/fixtures/joyent/my_datasets.json libcloud/test/compute/fixtures/joyent/my_machines.json libcloud/test/compute/fixtures/joyent/my_machines_create.json libcloud/test/compute/fixtures/joyent/my_packages.json libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json libcloud/test/compute/fixtures/ktucloud/listIpForwardingRules_default.json libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json libcloud/test/compute/fixtures/ktucloud/listZones_default.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json libcloud/test/compute/fixtures/linode/_avail_datacenters.json libcloud/test/compute/fixtures/linode/_avail_distributions.json libcloud/test/compute/fixtures/linode/_avail_kernels.json libcloud/test/compute/fixtures/linode/_avail_linodeplans.json libcloud/test/compute/fixtures/linode/_batch.json libcloud/test/compute/fixtures/linode/_linode_disk_list.json libcloud/test/compute/fixtures/linode/_linode_ip_list.json libcloud/test/compute/fixtures/linode/_linode_list.json libcloud/test/compute/fixtures/meta/helloworld.txt libcloud/test/compute/fixtures/meta/unicode.json libcloud/test/compute/fixtures/meta/unicode.txt libcloud/test/compute/fixtures/meta/unicode.xml libcloud/test/compute/fixtures/misc/dummy_rsa libcloud/test/compute/fixtures/misc/dummy_rsa.pub libcloud/test/compute/fixtures/nephoscale/list_images.json libcloud/test/compute/fixtures/nephoscale/list_keys.json libcloud/test/compute/fixtures/nephoscale/list_locations.json libcloud/test/compute/fixtures/nephoscale/list_nodes.json libcloud/test/compute/fixtures/nephoscale/list_password_keys.json libcloud/test/compute/fixtures/nephoscale/list_sizes.json libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json libcloud/test/compute/fixtures/nephoscale/success_action.json libcloud/test/compute/fixtures/onapp/create_node.json libcloud/test/compute/fixtures/onapp/get_key_pair.json libcloud/test/compute/fixtures/onapp/import_key_pair.json libcloud/test/compute/fixtures/onapp/list_images.json libcloud/test/compute/fixtures/onapp/list_key_pairs.json libcloud/test/compute/fixtures/onapp/list_nodes.json libcloud/test/compute/fixtures/onapp/profile.json libcloud/test/compute/fixtures/oneandone/auth_error.json libcloud/test/compute/fixtures/oneandone/create_node.json libcloud/test/compute/fixtures/oneandone/describe_firewall_policy.json libcloud/test/compute/fixtures/oneandone/describe_id_firewall_policy.json libcloud/test/compute/fixtures/oneandone/describe_server.json libcloud/test/compute/fixtures/oneandone/describe_shared_stoage.json libcloud/test/compute/fixtures/oneandone/ex_list_datacenters.json libcloud/test/compute/fixtures/oneandone/fixed_instance_sizes.json libcloud/test/compute/fixtures/oneandone/get_image.json libcloud/test/compute/fixtures/oneandone/get_server_image.json libcloud/test/compute/fixtures/oneandone/list_firewall_policies.json libcloud/test/compute/fixtures/oneandone/list_images.json libcloud/test/compute/fixtures/oneandone/list_load_balancer.json libcloud/test/compute/fixtures/oneandone/list_monitoring_policies.json libcloud/test/compute/fixtures/oneandone/list_public_ips.json libcloud/test/compute/fixtures/oneandone/list_servers.json libcloud/test/compute/fixtures/oneandone/list_shared_storages.json libcloud/test/compute/fixtures/oneandone/load_balancer.json libcloud/test/compute/fixtures/oneandone/load_balancer_rule.json libcloud/test/compute/fixtures/oneandone/load_balancer_rules.json libcloud/test/compute/fixtures/oneandone/load_balancer_server_ip.json libcloud/test/compute/fixtures/oneandone/load_balancer_server_ips.json libcloud/test/compute/fixtures/oneandone/monitoring_policy.json libcloud/test/compute/fixtures/oneandone/monitoring_policy_port.json libcloud/test/compute/fixtures/oneandone/monitoring_policy_ports.json libcloud/test/compute/fixtures/oneandone/monitoring_policy_process.json libcloud/test/compute/fixtures/oneandone/monitoring_policy_processes.json libcloud/test/compute/fixtures/oneandone/monitoring_policy_servers.json libcloud/test/compute/fixtures/oneandone/public_ip.json libcloud/test/compute/fixtures/oneandone/server_hardware.json libcloud/test/compute/fixtures/oneandone/server_ip.json libcloud/test/compute/fixtures/oneandone/server_ips.json libcloud/test/compute/fixtures/oneandone/shared_storage.json libcloud/test/compute/fixtures/oneandone/ttt.json libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml libcloud/test/compute/fixtures/opennebula_1_4/computes.xml libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml libcloud/test/compute/fixtures/opennebula_1_4/networks.xml libcloud/test/compute/fixtures/opennebula_1_4/storage.xml libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml libcloud/test/compute/fixtures/openstack/300_multiple_choices.json libcloud/test/compute/fixtures/openstack/_v1_1__auth.json libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json libcloud/test/compute/fixtures/openstack/_v2_0__auth.json libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json libcloud/test/compute/fixtures/openstack/_v3__auth.json libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml libcloud/test/compute/fixtures/openstack_identity/v2/v2_0_tenants.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_create_user.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_domains.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_domains_default.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_domains_default_users_a_roles.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_projects.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_roles.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_users.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_users_a.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_users_a_projects.json libcloud/test/compute/fixtures/openstack_identity/v3/v3_versions.json libcloud/test/compute/fixtures/openstack_v1.1/README libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json libcloud/test/compute/fixtures/openstack_v1.1/_servers.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json libcloud/test/compute/fixtures/ovh/auth_time_get.json libcloud/test/compute/fixtures/ovh/flavor_get.json libcloud/test/compute/fixtures/ovh/flavor_get_detail.json libcloud/test/compute/fixtures/ovh/image_get.json libcloud/test/compute/fixtures/ovh/image_get_detail.json libcloud/test/compute/fixtures/ovh/instance_get.json libcloud/test/compute/fixtures/ovh/instance_get_detail.json libcloud/test/compute/fixtures/ovh/instance_post.json libcloud/test/compute/fixtures/ovh/region_get.json libcloud/test/compute/fixtures/ovh/ssh_get.json libcloud/test/compute/fixtures/ovh/ssh_get_detail.json libcloud/test/compute/fixtures/ovh/volume_get.json libcloud/test/compute/fixtures/ovh/volume_get_detail.json libcloud/test/compute/fixtures/ovh/volume_snapshot_get.json libcloud/test/compute/fixtures/ovh/volume_snapshot_get_details.json libcloud/test/compute/fixtures/packet/device_create.json libcloud/test/compute/fixtures/packet/devices.json libcloud/test/compute/fixtures/packet/facilities.json libcloud/test/compute/fixtures/packet/operatingsystems.json libcloud/test/compute/fixtures/packet/plans.json libcloud/test/compute/fixtures/packet/sshkey_create.json libcloud/test/compute/fixtures/packet/sshkeys.json libcloud/test/compute/fixtures/profitbricks/attach_volume.json libcloud/test/compute/fixtures/profitbricks/create_node.json libcloud/test/compute/fixtures/profitbricks/create_volume.json libcloud/test/compute/fixtures/profitbricks/create_volume_snapshot.json libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.json libcloud/test/compute/fixtures/profitbricks/ex_create_firewall_rule.json libcloud/test/compute/fixtures/profitbricks/ex_create_ip_block.json libcloud/test/compute/fixtures/profitbricks/ex_create_lan.json libcloud/test/compute/fixtures/profitbricks/ex_create_load_balancer.json libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.json libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.json libcloud/test/compute/fixtures/profitbricks/ex_describe_firewall_rule.json libcloud/test/compute/fixtures/profitbricks/ex_describe_image.json libcloud/test/compute/fixtures/profitbricks/ex_describe_ip_block.json libcloud/test/compute/fixtures/profitbricks/ex_describe_lan.json libcloud/test/compute/fixtures/profitbricks/ex_describe_load_balancer.json libcloud/test/compute/fixtures/profitbricks/ex_describe_location.json libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.json libcloud/test/compute/fixtures/profitbricks/ex_describe_node.json libcloud/test/compute/fixtures/profitbricks/ex_describe_snapshot.json libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.json libcloud/test/compute/fixtures/profitbricks/ex_list_attached_volumes.json libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.json libcloud/test/compute/fixtures/profitbricks/ex_list_firewall_rules.json libcloud/test/compute/fixtures/profitbricks/ex_list_ip_blocks.json libcloud/test/compute/fixtures/profitbricks/ex_list_lans.json libcloud/test/compute/fixtures/profitbricks/ex_list_load_balanced_nics.json libcloud/test/compute/fixtures/profitbricks/ex_list_load_balancers.json libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.json libcloud/test/compute/fixtures/profitbricks/ex_rename_datacenter.json libcloud/test/compute/fixtures/profitbricks/ex_set_inet_access.json libcloud/test/compute/fixtures/profitbricks/ex_update_firewall_rule.json libcloud/test/compute/fixtures/profitbricks/ex_update_image.json libcloud/test/compute/fixtures/profitbricks/ex_update_lan.json libcloud/test/compute/fixtures/profitbricks/ex_update_load_balancer.json libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.json libcloud/test/compute/fixtures/profitbricks/ex_update_node.json libcloud/test/compute/fixtures/profitbricks/ex_update_snapshot.json libcloud/test/compute/fixtures/profitbricks/ex_update_volume.json libcloud/test/compute/fixtures/profitbricks/list_images.json libcloud/test/compute/fixtures/profitbricks/list_locations.json libcloud/test/compute/fixtures/profitbricks/list_nodes.json libcloud/test/compute/fixtures/profitbricks/list_snapshots.json libcloud/test/compute/fixtures/profitbricks/list_volumes.json libcloud/test/compute/fixtures/rimuhosting/r_distributions.json libcloud/test/compute/fixtures/rimuhosting/r_orders.json libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml libcloud/test/compute/fixtures/softlayer/empty.xml libcloud/test/compute/fixtures/softlayer/fail.xml libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_2518935e_b315_4d8e_9e99_9275f751877c.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_deploy.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_fab4b26f_4f2e_4d49_ad01_ae9324bbfe48.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_fe75d3af_f5a3_44a5_b016_ae0bdadfc32b.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_create_snapshot.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_remove_snapshots.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_revert_snapshot.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml libcloud/test/compute/fixtures/voxel/create_node.xml libcloud/test/compute/fixtures/voxel/failure.xml libcloud/test/compute/fixtures/voxel/images.xml libcloud/test/compute/fixtures/voxel/locations.xml libcloud/test/compute/fixtures/voxel/nodes.xml libcloud/test/compute/fixtures/voxel/success.xml libcloud/test/compute/fixtures/voxel/unauthorized.xml libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json libcloud/test/compute/fixtures/vultr/create_key_pair.json libcloud/test/compute/fixtures/vultr/create_node.json libcloud/test/compute/fixtures/vultr/error_rate_limit.txt libcloud/test/compute/fixtures/vultr/list_images.json libcloud/test/compute/fixtures/vultr/list_key_pairs.json libcloud/test/compute/fixtures/vultr/list_locations.json libcloud/test/compute/fixtures/vultr/list_nodes.json libcloud/test/compute/fixtures/vultr/list_sizes.json libcloud/test/container/__init__.py libcloud/test/container/test_docker.py libcloud/test/container/test_docker_utils.py libcloud/test/container/test_ecs.py libcloud/test/container/test_gke.py libcloud/test/container/test_joyent.py libcloud/test/container/test_kubernetes.py libcloud/test/container/test_rancher.py libcloud/test/container/fixtures/docker/linux_124/container_a68.json libcloud/test/container/fixtures/docker/linux_124/containers.json libcloud/test/container/fixtures/docker/linux_124/create_container.json libcloud/test/container/fixtures/docker/linux_124/create_image.txt libcloud/test/container/fixtures/docker/linux_124/images.json libcloud/test/container/fixtures/docker/linux_124/logs.txt libcloud/test/container/fixtures/docker/linux_124/search.json libcloud/test/container/fixtures/docker/linux_124/version.json libcloud/test/container/fixtures/docker/mac_124/containers.json libcloud/test/container/fixtures/docker/mac_124/create_container.json libcloud/test/container/fixtures/docker/mac_124/create_image.txt libcloud/test/container/fixtures/docker/mac_124/search.json libcloud/test/container/fixtures/docker/mac_124/version.json libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu.json libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_tags.json libcloud/test/container/fixtures/docker_utils/v2_repositories_library_ubuntu_tags_latest.json libcloud/test/container/fixtures/ecs/createcluster.json libcloud/test/container/fixtures/ecs/createservice.json libcloud/test/container/fixtures/ecs/deletecluster.json libcloud/test/container/fixtures/ecs/deleteservice.json libcloud/test/container/fixtures/ecs/describeclusters.json libcloud/test/container/fixtures/ecs/describerepositories.json libcloud/test/container/fixtures/ecs/describeservices.json libcloud/test/container/fixtures/ecs/describetasks.json libcloud/test/container/fixtures/ecs/getauthorizationtoken.json libcloud/test/container/fixtures/ecs/listclusters.json libcloud/test/container/fixtures/ecs/listimages.json libcloud/test/container/fixtures/ecs/listservices.json libcloud/test/container/fixtures/ecs/listtasks.json libcloud/test/container/fixtures/ecs/registertaskdefinition.json libcloud/test/container/fixtures/ecs/runtask.json libcloud/test/container/fixtures/ecs/stoptask.json libcloud/test/container/fixtures/gke/zones_us-central1-a_instance_serverconfig.json libcloud/test/container/fixtures/gke/zones_us-central1-a_list.json libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces.json libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default.json libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_DELETE.json libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_default_pods_POST.json libcloud/test/container/fixtures/kubernetes/_api_v1_namespaces_test.json libcloud/test/container/fixtures/kubernetes/_api_v1_nodes.json libcloud/test/container/fixtures/kubernetes/_api_v1_nodes_127_0_0_1.json libcloud/test/container/fixtures/kubernetes/_api_v1_pods.json libcloud/test/container/fixtures/rancher/deploy_container.json libcloud/test/container/fixtures/rancher/ex_activate_service.json libcloud/test/container/fixtures/rancher/ex_activate_stack.json libcloud/test/container/fixtures/rancher/ex_deploy_service.json libcloud/test/container/fixtures/rancher/ex_deploy_stack.json libcloud/test/container/fixtures/rancher/ex_destroy_service.json libcloud/test/container/fixtures/rancher/ex_destroy_stack.json libcloud/test/container/fixtures/rancher/ex_list_services.json libcloud/test/container/fixtures/rancher/ex_list_stacks.json libcloud/test/container/fixtures/rancher/ex_search_containers.json libcloud/test/container/fixtures/rancher/ex_search_services.json libcloud/test/container/fixtures/rancher/list_containers.json libcloud/test/container/fixtures/rancher/start_container.json libcloud/test/container/fixtures/rancher/stop_container.json libcloud/test/dns/__init__.py libcloud/test/dns/test_auroradns.py libcloud/test/dns/test_base.py libcloud/test/dns/test_buddyns.py libcloud/test/dns/test_cloudflare.py libcloud/test/dns/test_digitalocean.py libcloud/test/dns/test_dnsimple.py libcloud/test/dns/test_dnspod.py libcloud/test/dns/test_durabledns.py libcloud/test/dns/test_gandi.py libcloud/test/dns/test_godaddy.py libcloud/test/dns/test_google.py libcloud/test/dns/test_hostvirtual.py libcloud/test/dns/test_linode.py libcloud/test/dns/test_liquidweb.py libcloud/test/dns/test_luadns.py libcloud/test/dns/test_nfsn.py libcloud/test/dns/test_nsone.py libcloud/test/dns/test_onapp.py libcloud/test/dns/test_pointdns.py libcloud/test/dns/test_powerdns.py libcloud/test/dns/test_rackspace.py libcloud/test/dns/test_route53.py libcloud/test/dns/test_softlayer.py libcloud/test/dns/test_vultr.py libcloud/test/dns/test_worldwidedns.py libcloud/test/dns/test_zerigo.py libcloud/test/dns/test_zonomi.py libcloud/test/dns/fixtures/auroradns/zone_create.json libcloud/test/dns/fixtures/auroradns/zone_example_com.json libcloud/test/dns/fixtures/auroradns/zone_example_com_health_check.json libcloud/test/dns/fixtures/auroradns/zone_example_com_health_checks.json libcloud/test/dns/fixtures/auroradns/zone_example_com_record_localhost.json libcloud/test/dns/fixtures/auroradns/zone_example_com_records.json libcloud/test/dns/fixtures/auroradns/zone_list.json libcloud/test/dns/fixtures/buddyns/create_zone_success.json libcloud/test/dns/fixtures/buddyns/delete_zone_success.json libcloud/test/dns/fixtures/buddyns/empty_zones_list.json libcloud/test/dns/fixtures/buddyns/get_zone_success.json libcloud/test/dns/fixtures/buddyns/list_zones.json libcloud/test/dns/fixtures/buddyns/zone_already_exists.json libcloud/test/dns/fixtures/buddyns/zone_does_not_exist.json libcloud/test/dns/fixtures/cloudflare/ban.json libcloud/test/dns/fixtures/cloudflare/cache_lvl.json libcloud/test/dns/fixtures/cloudflare/devmode.json libcloud/test/dns/fixtures/cloudflare/fpurge_ts.json libcloud/test/dns/fixtures/cloudflare/ip_lkup.json libcloud/test/dns/fixtures/cloudflare/ipv46.json libcloud/test/dns/fixtures/cloudflare/nul_.json libcloud/test/dns/fixtures/cloudflare/rec_delete.json libcloud/test/dns/fixtures/cloudflare/rec_edit.json libcloud/test/dns/fixtures/cloudflare/rec_load_all.json libcloud/test/dns/fixtures/cloudflare/rec_new.json libcloud/test/dns/fixtures/cloudflare/sec_lvl.json libcloud/test/dns/fixtures/cloudflare/stats.json libcloud/test/dns/fixtures/cloudflare/wl.json libcloud/test/dns/fixtures/cloudflare/zone_check.json libcloud/test/dns/fixtures/cloudflare/zone_file_purge.json libcloud/test/dns/fixtures/cloudflare/zone_load_multi.json libcloud/test/dns/fixtures/cloudflare/zone_settings.json libcloud/test/dns/fixtures/digitalocean/_v2_domains.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_CREATE.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_EMPTY.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_UNAUTHORIZED.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_NOT_FOUND.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234560.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234561.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234562.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234564.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234564_NOT_FOUND.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_1234564_UPDATE.json libcloud/test/dns/fixtures/digitalocean/_v2_domains_testdomain_records_CREATE.json libcloud/test/dns/fixtures/dnsimple/create_domain.json libcloud/test/dns/fixtures/dnsimple/create_domain_record.json libcloud/test/dns/fixtures/dnsimple/get_domain.json libcloud/test/dns/fixtures/dnsimple/get_domain_record.json libcloud/test/dns/fixtures/dnsimple/list_domain_records.json libcloud/test/dns/fixtures/dnsimple/list_domains.json libcloud/test/dns/fixtures/dnsimple/update_domain_record.json libcloud/test/dns/fixtures/dnspod/create_zone_success.json libcloud/test/dns/fixtures/dnspod/delete_record_record_does_not_exist.json libcloud/test/dns/fixtures/dnspod/delete_record_success.json libcloud/test/dns/fixtures/dnspod/delete_zone_success.json libcloud/test/dns/fixtures/dnspod/empty_zones_list.json libcloud/test/dns/fixtures/dnspod/get_record.json libcloud/test/dns/fixtures/dnspod/get_zone_success.json libcloud/test/dns/fixtures/dnspod/list_records.json libcloud/test/dns/fixtures/dnspod/list_zones.json libcloud/test/dns/fixtures/dnspod/record_already_exists.json libcloud/test/dns/fixtures/dnspod/zone_already_exists.json libcloud/test/dns/fixtures/dnspod/zone_does_not_exist.json libcloud/test/dns/fixtures/durabledns/create_record_NO_EXTRA_PARAMS.xml libcloud/test/dns/fixtures/durabledns/create_record_WITH_EXTRA_PARAMS.xml libcloud/test/dns/fixtures/durabledns/create_record_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/create_zone.xml libcloud/test/dns/fixtures/durabledns/create_zone_ZONE_ALREADY_EXIST.xml libcloud/test/dns/fixtures/durabledns/delete_record.xml libcloud/test/dns/fixtures/durabledns/delete_record_RECORD_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/delete_record_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/delete_zone.xml libcloud/test/dns/fixtures/durabledns/delete_zone_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/get_record.xml libcloud/test/dns/fixtures/durabledns/get_record_RECORD_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/get_record_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/get_zone.xml libcloud/test/dns/fixtures/durabledns/get_zone_NO_EXTRA_PARAMS.xml libcloud/test/dns/fixtures/durabledns/get_zone_UPDATE_ZONE.xml libcloud/test/dns/fixtures/durabledns/get_zone_WITH_EXTRA_PARAMS.xml libcloud/test/dns/fixtures/durabledns/get_zone_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/list_records.xml libcloud/test/dns/fixtures/durabledns/list_records_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/list_zones.xml libcloud/test/dns/fixtures/durabledns/update_record.xml libcloud/test/dns/fixtures/durabledns/update_record_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/durabledns/update_record_success.xml libcloud/test/dns/fixtures/durabledns/update_zone_UPDATE_ZONE.xml libcloud/test/dns/fixtures/durabledns/update_zone_ZONE_DOES_NOT_EXIST.xml libcloud/test/dns/fixtures/gandi/create_record.xml libcloud/test/dns/fixtures/gandi/create_zone.xml libcloud/test/dns/fixtures/gandi/delete_record.xml libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml libcloud/test/dns/fixtures/gandi/delete_zone.xml libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml libcloud/test/dns/fixtures/gandi/get_zone.xml libcloud/test/dns/fixtures/gandi/list_records.xml libcloud/test/dns/fixtures/gandi/list_records_empty.xml libcloud/test/dns/fixtures/gandi/list_zones.xml libcloud/test/dns/fixtures/gandi/new_version.xml libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml libcloud/test/dns/fixtures/godaddy/purchase_request.json libcloud/test/dns/fixtures/godaddy/v1_domains.json libcloud/test/dns/fixtures/godaddy/v1_domains_agreements.json libcloud/test/dns/fixtures/godaddy/v1_domains_aperture_platform_com.json libcloud/test/dns/fixtures/godaddy/v1_domains_aperture_platform_com_records.json libcloud/test/dns/fixtures/godaddy/v1_domains_aperture_platform_com_records_A_www.json libcloud/test/dns/fixtures/godaddy/v1_domains_available.json libcloud/test/dns/fixtures/godaddy/v1_domains_purchase.json libcloud/test/dns/fixtures/godaddy/v1_domains_purchase_schema_com.json libcloud/test/dns/fixtures/godaddy/v1_domains_tlds.json libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json libcloud/test/dns/fixtures/google/managed_zones_1.json libcloud/test/dns/fixtures/google/no_record.json libcloud/test/dns/fixtures/google/record.json libcloud/test/dns/fixtures/google/record_changes.json libcloud/test/dns/fixtures/google/records_list.json libcloud/test/dns/fixtures/google/zone.json libcloud/test/dns/fixtures/google/zone_create.json libcloud/test/dns/fixtures/google/zone_list.json libcloud/test/dns/fixtures/hostvirtual/get_record.json libcloud/test/dns/fixtures/hostvirtual/get_zone.json libcloud/test/dns/fixtures/hostvirtual/list_records.json libcloud/test/dns/fixtures/hostvirtual/list_records_none.json libcloud/test/dns/fixtures/hostvirtual/list_zones.json libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json libcloud/test/dns/fixtures/linode/create_domain.json libcloud/test/dns/fixtures/linode/create_domain_validation_error.json libcloud/test/dns/fixtures/linode/create_resource.json libcloud/test/dns/fixtures/linode/delete_domain.json libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json libcloud/test/dns/fixtures/linode/delete_resource.json libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json libcloud/test/dns/fixtures/linode/domain_list.json libcloud/test/dns/fixtures/linode/get_record.json libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json libcloud/test/dns/fixtures/linode/get_zone.json libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json libcloud/test/dns/fixtures/linode/resource_list.json libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json libcloud/test/dns/fixtures/linode/update_domain.json libcloud/test/dns/fixtures/linode/update_resource.json libcloud/test/dns/fixtures/liquidweb/create_zone_success.json libcloud/test/dns/fixtures/liquidweb/delete_record.json libcloud/test/dns/fixtures/liquidweb/delete_zone.success.json libcloud/test/dns/fixtures/liquidweb/delete_zone_success.json libcloud/test/dns/fixtures/liquidweb/duplicate_record.json libcloud/test/dns/fixtures/liquidweb/empty_records_list.json libcloud/test/dns/fixtures/liquidweb/empty_zones_list.json libcloud/test/dns/fixtures/liquidweb/get_record.json libcloud/test/dns/fixtures/liquidweb/get_zone.json libcloud/test/dns/fixtures/liquidweb/record_does_not_exist.json libcloud/test/dns/fixtures/liquidweb/records_list.json libcloud/test/dns/fixtures/liquidweb/update_record.json libcloud/test/dns/fixtures/liquidweb/zone_does_not_exist.json libcloud/test/dns/fixtures/liquidweb/zones_list.json libcloud/test/dns/fixtures/luadns/create_record_success.json libcloud/test/dns/fixtures/luadns/create_zone_success.json libcloud/test/dns/fixtures/luadns/delete_record_success.json libcloud/test/dns/fixtures/luadns/delete_zone_success.json libcloud/test/dns/fixtures/luadns/empty_records_list.json libcloud/test/dns/fixtures/luadns/empty_zones_list.json libcloud/test/dns/fixtures/luadns/get_record.json libcloud/test/dns/fixtures/luadns/get_zone.json libcloud/test/dns/fixtures/luadns/record_does_not_exist.json libcloud/test/dns/fixtures/luadns/records_list.json libcloud/test/dns/fixtures/luadns/zone_already_exists.json libcloud/test/dns/fixtures/luadns/zone_does_not_exist.json libcloud/test/dns/fixtures/luadns/zones_list.json libcloud/test/dns/fixtures/nfsn/list_one_record.json libcloud/test/dns/fixtures/nfsn/list_records.json libcloud/test/dns/fixtures/nfsn/list_records_created.json libcloud/test/dns/fixtures/nfsn/record_not_removed.json libcloud/test/dns/fixtures/nfsn/zone_not_found.json libcloud/test/dns/fixtures/nsone/create_zone_success.json libcloud/test/dns/fixtures/nsone/delete_record_success.json libcloud/test/dns/fixtures/nsone/delete_zone_success.json libcloud/test/dns/fixtures/nsone/empty_zones_list.json libcloud/test/dns/fixtures/nsone/get_record_success.json libcloud/test/dns/fixtures/nsone/get_zone_success.json libcloud/test/dns/fixtures/nsone/list_records_empty.json libcloud/test/dns/fixtures/nsone/list_zones.json libcloud/test/dns/fixtures/nsone/record_does_not_exist.json libcloud/test/dns/fixtures/nsone/zone_already_exists.json libcloud/test/dns/fixtures/nsone/zone_does_not_exist.json libcloud/test/dns/fixtures/onapp/create_record.json libcloud/test/dns/fixtures/onapp/create_zone.json libcloud/test/dns/fixtures/onapp/dns_zone_not_found.json libcloud/test/dns/fixtures/onapp/get_record.json libcloud/test/dns/fixtures/onapp/get_record_after_update.json libcloud/test/dns/fixtures/onapp/get_zone.json libcloud/test/dns/fixtures/onapp/list_records.json libcloud/test/dns/fixtures/onapp/list_zones.json libcloud/test/dns/fixtures/pointdns/_zones_1_DELETE.json libcloud/test/dns/fixtures/pointdns/_zones_1_ZONE_UPDATE.json libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_CREATE.json libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_DELETE.json libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_GET.json libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_LIST.json libcloud/test/dns/fixtures/pointdns/_zones_1_mail_redirects_UPDATE.json libcloud/test/dns/fixtures/pointdns/_zones_1_records_141_GET.json libcloud/test/dns/fixtures/pointdns/_zones_1_records_141_UPDATE.json libcloud/test/dns/fixtures/pointdns/_zones_1_records_150_DELETE.json libcloud/test/dns/fixtures/pointdns/_zones_1_records_GET.json libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_CREATE.json libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_DELETE.json libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_GET.json libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_LIST.json libcloud/test/dns/fixtures/pointdns/_zones_1_redirects_UPDATE.json libcloud/test/dns/fixtures/pointdns/_zones_CREATE.json libcloud/test/dns/fixtures/pointdns/_zones_GET.json libcloud/test/dns/fixtures/pointdns/_zones_GET_1.json libcloud/test/dns/fixtures/pointdns/_zones_example_com_UPDATE.json libcloud/test/dns/fixtures/pointdns/_zones_example_com_records_CREATE.json libcloud/test/dns/fixtures/pointdns/error.json libcloud/test/dns/fixtures/pointdns/not_found.json libcloud/test/dns/fixtures/pointdns/redirect_error.json libcloud/test/dns/fixtures/powerdns/list_records.json libcloud/test/dns/fixtures/powerdns/list_zones.json libcloud/test/dns/fixtures/rackspace/auth_1_1.json libcloud/test/dns/fixtures/rackspace/auth_2_0.json libcloud/test/dns/fixtures/rackspace/create_ptr_record_success.json libcloud/test/dns/fixtures/rackspace/create_record_success.json libcloud/test/dns/fixtures/rackspace/create_zone_success.json libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json libcloud/test/dns/fixtures/rackspace/delete_ptr_record_success.json libcloud/test/dns/fixtures/rackspace/delete_record_success.json libcloud/test/dns/fixtures/rackspace/delete_zone_success.json libcloud/test/dns/fixtures/rackspace/does_not_exist.json libcloud/test/dns/fixtures/rackspace/get_record_success.json libcloud/test/dns/fixtures/rackspace/get_zone_success.json libcloud/test/dns/fixtures/rackspace/list_ptr_records_success.json libcloud/test/dns/fixtures/rackspace/list_records_no_results.json libcloud/test/dns/fixtures/rackspace/list_records_success.json libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json libcloud/test/dns/fixtures/rackspace/list_zones_success.json libcloud/test/dns/fixtures/rackspace/unauthorized.json libcloud/test/dns/fixtures/rackspace/update_record_success.json libcloud/test/dns/fixtures/rackspace/update_zone_success.json libcloud/test/dns/fixtures/route53/create_zone.xml libcloud/test/dns/fixtures/route53/get_zone.xml libcloud/test/dns/fixtures/route53/invalid_change_batch.xml libcloud/test/dns/fixtures/route53/list_records.xml libcloud/test/dns/fixtures/route53/list_zones.xml libcloud/test/dns/fixtures/route53/record_does_not_exist.xml libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml libcloud/test/dns/fixtures/softlayer/not_found.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_createObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_deleteObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_editObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_getObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_ResourceRecord_getObject_changed.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_createObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_deleteObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getByDomainName.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getObject.xml libcloud/test/dns/fixtures/softlayer/v3_SoftLayer_Dns_Domain_getResourceRecords.xml libcloud/test/dns/fixtures/vultr/delete_zone.json libcloud/test/dns/fixtures/vultr/empty_records_list.json libcloud/test/dns/fixtures/vultr/empty_zones_list.json libcloud/test/dns/fixtures/vultr/get_record.json libcloud/test/dns/fixtures/vultr/get_zone.json libcloud/test/dns/fixtures/vultr/list_domains.json libcloud/test/dns/fixtures/vultr/list_records.json libcloud/test/dns/fixtures/vultr/test_zone.json libcloud/test/dns/fixtures/worldwidedns/_api_dns_modify_asp_CREATE_RECORD_MAX_ENTRIES_WITH_ENTRY libcloud/test/dns/fixtures/worldwidedns/api_dns_list libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_RECORD libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_RECORD_MAX_ENTRIES libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_CREATE_SECOND_RECORD libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_DELETE_RECORD libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_UPDATE_RECORD libcloud/test/dns/fixtures/worldwidedns/api_dns_list_domain_asp_UPDATE_ZONE libcloud/test/dns/fixtures/zerigo/create_record.xml libcloud/test/dns/fixtures/zerigo/create_zone.xml libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml libcloud/test/dns/fixtures/zerigo/get_record.xml libcloud/test/dns/fixtures/zerigo/get_zone.xml libcloud/test/dns/fixtures/zerigo/list_records.xml libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml libcloud/test/dns/fixtures/zerigo/list_zones.xml libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml libcloud/test/dns/fixtures/zonomi/converted_to_master.xml libcloud/test/dns/fixtures/zonomi/converted_to_slave.xml libcloud/test/dns/fixtures/zonomi/couldnt_convert.xml libcloud/test/dns/fixtures/zonomi/create_record.xml libcloud/test/dns/fixtures/zonomi/create_record_already_exists.xml libcloud/test/dns/fixtures/zonomi/create_zone.xml libcloud/test/dns/fixtures/zonomi/create_zone_already_exists.xml libcloud/test/dns/fixtures/zonomi/delete_record.xml libcloud/test/dns/fixtures/zonomi/delete_record_does_not_exist.xml libcloud/test/dns/fixtures/zonomi/delete_zone.xml libcloud/test/dns/fixtures/zonomi/delete_zone_does_not_exist.xml libcloud/test/dns/fixtures/zonomi/empty_zones_list.xml libcloud/test/dns/fixtures/zonomi/list_records.xml libcloud/test/dns/fixtures/zonomi/list_zones.xml libcloud/test/loadbalancer/__init__.py libcloud/test/loadbalancer/test_alb.py libcloud/test/loadbalancer/test_brightbox.py libcloud/test/loadbalancer/test_cloudstack.py libcloud/test/loadbalancer/test_dimensiondata_v2_3.py libcloud/test/loadbalancer/test_dimensiondata_v2_4.py libcloud/test/loadbalancer/test_elb.py libcloud/test/loadbalancer/test_gce.py libcloud/test/loadbalancer/test_gogrid.py libcloud/test/loadbalancer/test_ninefold.py libcloud/test/loadbalancer/test_rackspace.py libcloud/test/loadbalancer/test_slb.py libcloud/test/loadbalancer/test_softlayer.py libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_listeters.xml libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_rules.xml libcloud/test/loadbalancer/fixtures/alb/describe_load_balancer_target_groups.xml libcloud/test/loadbalancer/fixtures/alb/describe_load_balancers.xml libcloud/test/loadbalancer/fixtures/alb/describe_tags.xml libcloud/test/loadbalancer/fixtures/alb/describe_target_health.xml libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json libcloud/test/loadbalancer/fixtures/brightbox/token.json libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_addPoolMember.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createNode.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createPool.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_createVirtualListener.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultHealthMonitor.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultIrule.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_defaultPersistenceProfile.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteNode.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deletePool.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_deleteVirtualListener.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editNode.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPool.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_editPoolMember.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_node.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_node_34de6ed6_46a4_4dae_a753_2f8d3840c6f9.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_pool.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_poolMember_3dd806a2_c2c8_4c0c_9a4f_5219ea9266c0.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_pool_4d360b1f_bc2c_4ab7_9884_1f03ba2768f7.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_removePoolMember.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualListener.xml libcloud/test/loadbalancer/fixtures/dimensiondata/networkDomainVip_virtualListener_6115469d_a8bb_445b_bb23_d23b5283f2b9.xml libcloud/test/loadbalancer/fixtures/dimensiondata/oec_0_9_myaccount.xml libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml libcloud/test/loadbalancer/fixtures/elb/describe_tags.xml libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json libcloud/test/loadbalancer/fixtures/slb/add_backend_servers.xml libcloud/test/loadbalancer/fixtures/slb/create_load_balancer.xml libcloud/test/loadbalancer/fixtures/slb/create_load_balancer_http_listener.xml libcloud/test/loadbalancer/fixtures/slb/delete_load_balancer.xml libcloud/test/loadbalancer/fixtures/slb/delete_server_certificate.xml libcloud/test/loadbalancer/fixtures/slb/describe_load_balancer_attribute.xml libcloud/test/loadbalancer/fixtures/slb/describe_load_balancers.xml libcloud/test/loadbalancer/fixtures/slb/describe_server_certificates.xml libcloud/test/loadbalancer/fixtures/slb/remove_backend_servers.xml libcloud/test/loadbalancer/fixtures/slb/set_server_certificate_name.xml libcloud/test/loadbalancer/fixtures/slb/start_load_balancer_listener.xml libcloud/test/loadbalancer/fixtures/slb/upload_server_certificate.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Account_getAdcLoadBalancers.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Billing_Item_cancelService.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Location_Datacenter_getDatacenters.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_deleteObject.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_editObject.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getBillingItem.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_getObject.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Network_Subnet_IpAddress_getByIpAddress.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Order_placeOrder.xml libcloud/test/loadbalancer/fixtures/softlayer/v3__SoftLayer_Product_Package_getItems.xml libcloud/test/storage/__init__.py libcloud/test/storage/test_atmos.py libcloud/test/storage/test_aurora.py libcloud/test/storage/test_azure_blobs.py libcloud/test/storage/test_backblaze_b2.py libcloud/test/storage/test_base.py libcloud/test/storage/test_cloudfiles.py libcloud/test/storage/test_google_storage.py libcloud/test/storage/test_local.py libcloud/test/storage/test_ninefold.py libcloud/test/storage/test_oss.py libcloud/test/storage/test_rgw.py libcloud/test/storage/test_s3.py libcloud/test/storage/fixtures/atmos/already_exists.xml libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml libcloud/test/storage/fixtures/atmos/list_containers.xml libcloud/test/storage/fixtures/atmos/not_empty.xml libcloud/test/storage/fixtures/atmos/not_found.xml libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml libcloud/test/storage/fixtures/backblaze_b2/b2_create_bucket.json libcloud/test/storage/fixtures/backblaze_b2/b2_delete_bucket.json libcloud/test/storage/fixtures/backblaze_b2/b2_delete_file_version.json libcloud/test/storage/fixtures/backblaze_b2/b2_get_upload_url.json libcloud/test/storage/fixtures/backblaze_b2/b2_hide_file.json libcloud/test/storage/fixtures/backblaze_b2/b2_list_buckets.json libcloud/test/storage/fixtures/backblaze_b2/b2_list_file_names.json libcloud/test/storage/fixtures/backblaze_b2/b2_list_file_versions.json libcloud/test/storage/fixtures/backblaze_b2/b2_upload_file.json libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json libcloud/test/storage/fixtures/cloudfiles/list_containers.json libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json libcloud/test/storage/fixtures/cloudfiles/meta_data.json libcloud/test/storage/fixtures/google_storage/get_container.json libcloud/test/storage/fixtures/google_storage/get_object.json libcloud/test/storage/fixtures/google_storage/list_container_acl.json libcloud/test/storage/fixtures/google_storage/list_container_objects.xml libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml libcloud/test/storage/fixtures/google_storage/list_containers.xml libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml libcloud/test/storage/fixtures/google_storage/list_object_acl.json libcloud/test/storage/fixtures/oss/complete_multipart_upload.xml libcloud/test/storage/fixtures/oss/ex_iterate_multipart_uploads_p1.xml libcloud/test/storage/fixtures/oss/ex_iterate_multipart_uploads_p2.xml libcloud/test/storage/fixtures/oss/initiate_multipart_upload.xml libcloud/test/storage/fixtures/oss/list_container_objects.xml libcloud/test/storage/fixtures/oss/list_container_objects_chinese.xml libcloud/test/storage/fixtures/oss/list_container_objects_empty.xml libcloud/test/storage/fixtures/oss/list_container_objects_prefix.xml libcloud/test/storage/fixtures/oss/list_containers.xml libcloud/test/storage/fixtures/oss/list_containers_empty.xml libcloud/test/storage/fixtures/s3/complete_multipart.xml libcloud/test/storage/fixtures/s3/initiate_multipart.xml libcloud/test/storage/fixtures/s3/list_container_objects.xml libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml libcloud/test/storage/fixtures/s3/list_containers.xml libcloud/test/storage/fixtures/s3/list_containers_empty.xml libcloud/test/storage/fixtures/s3/list_multipart_1.xml libcloud/test/storage/fixtures/s3/list_multipart_2.xml libcloud/utils/__init__.py libcloud/utils/connection.py libcloud/utils/decorators.py libcloud/utils/dist.py libcloud/utils/escape.py libcloud/utils/files.py libcloud/utils/iso8601.py libcloud/utils/logging.py libcloud/utils/loggingconnection.py libcloud/utils/misc.py libcloud/utils/networking.py libcloud/utils/publickey.py libcloud/utils/py3.py libcloud/utils/xml.py scripts/check_file_names.shapache-libcloud-2.2.1/apache_libcloud.egg-info/not-zip-safe0000664000175000017500000000000112701216437023437 0ustar kamikami00000000000000 apache-libcloud-2.2.1/apache_libcloud.egg-info/dependency_links.txt0000664000175000017500000000000113160535106025254 0ustar kamikami00000000000000 apache-libcloud-2.2.1/apache_libcloud.egg-info/top_level.txt0000664000175000017500000000001113160535106023730 0ustar kamikami00000000000000libcloud apache-libcloud-2.2.1/apache_libcloud.egg-info/PKG-INFO0000664000175000017500000000245613160535106022312 0ustar kamikami00000000000000Metadata-Version: 1.1 Name: apache-libcloud Version: 2.2.1 Summary: A standard Python library that abstracts away differences among multiple cloud provider APIs. For more information and documentation, please see http://libcloud.apache.org Home-page: http://libcloud.apache.org/ Author: Apache Software Foundation Author-email: dev@libcloud.apache.org License: Apache License (2.0) Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy apache-libcloud-2.2.1/LICENSE0000664000175000017500000002613612701023453015430 0ustar kamikami00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. apache-libcloud-2.2.1/tox.ini0000664000175000017500000000730413160534721015737 0ustar kamikami00000000000000[tox] envlist = py{2.6,2.7,pypy,pypy3,3.3,3.4,3.5,3.6},checks,lint,pylint,integration,coverage [testenv] passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH deps = -r{toxinidir}/requirements-tests.txt lockfile py{2.6,2.7}: paramiko py{2.6}: unittest2 commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py python setup.py test basepython = py2.6: python2.6 {py2.7,checks,lint,pylint,coverage}: python2.7 docs: python3.5 pypypy: pypy pypypy3: pypy3 py3.3: python3.3 py3.4: python3.4 py3.5: python3.5 py3.6: python3.6 whitelist_externals = cp bash scripts/*.sh [testenv:pypypy3] commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py python -m unittest discover libcloud/test [testenv:docs] deps = sphinx pysphere backports.ssl_match_hostname lockfile rstcheck changedir = docs commands = rstcheck --report warning ../CHANGES.rst python ../contrib/generate_provider_feature_matrix_table.py sphinx-apidoc -d 4 ../libcloud/ -o apidocs/ /bin/bash -c "ls apidocs/modules.rst && (grep orphan apidocs/modules.rst || sed -i '1i :orphan:\n' apidocs/modules.rst) || (exit 0)" sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html [testenv:docs-travis] # Note: We don't build API docs on Travis since it causes build failures because # those API docs files are not included anywhere. deps = sphinx pysphere backports.ssl_match_hostname lockfile rstcheck changedir = docs commands = rstcheck --report warning ../CHANGES.rst python ../contrib/generate_provider_feature_matrix_table.py /bin/bash -c "ls apidocs/modules.rst && (grep orphan apidocs/modules.rst || sed -i '1i :orphan:\n' apidocs/modules.rst) || (exit 0)" sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html [testenv:scrape-ec2-prices] deps = requests demjson commands = python contrib/scrape-ec2-prices.py [testenv:pylint] deps = -r{toxinidir}/requirements-tests.txt backports.ssl_match_hostname bottle lockfile commands = pylint -E --rcfile=./.pylintrc libcloud/common/ pylint -E --rcfile=./.pylintrc libcloud/container/ pylint -E --rcfile=./.pylintrc libcloud/backup/ pylint -E --rcfile=./.pylintrc libcloud/dns/ pylint -E --rcfile=./.pylintrc libcloud/storage/ pylint -E --rcfile=./.pylintrc libcloud/utils/ pylint -E --rcfile=./.pylintrc demos/ pylint -E --rcfile=./.pylintrc contrib/ [testenv:lint] deps = -r{toxinidir}/requirements-tests.txt backports.ssl_match_hostname lockfile commands = flake8 --ignore=E402 --exclude="test" libcloud/ flake8 --ignore=E402 --max-line-length=160 libcloud/test/ flake8 --ignore=E402 demos/ flake8 --ignore=E402 integration/ flake8 --ignore=E402,E902 docs/examples/ flake8 --ignore=E402,E902 --max-line-length=160 contrib/ python -mjson.tool libcloud/data/pricing.json [testenv:checks] commands = bash ./scripts/check_file_names.sh [testenv:integration] deps = -r{toxinidir}/integration/requirements.txt commands = python -m integration [testenv:coverage] deps = -r{toxinidir}/requirements-tests.txt set-env = commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py coverage run --source=libcloud setup.py test [testenv:coverage-travis] passenv = TOXENV CI TRAVIS TRAVIS_* deps = -r{toxinidir}/requirements-tests.txt set-env = commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py coverage run --source=libcloud setup.py test codecov apache-libcloud-2.2.1/demos/0000775000175000017500000000000013160535107015526 5ustar kamikami00000000000000apache-libcloud-2.2.1/demos/__init__.py0000664000175000017500000000000013153541406017626 0ustar kamikami00000000000000apache-libcloud-2.2.1/demos/gce_demo.py0000775000175000017500000011034613153541406017653 0ustar kamikami00000000000000#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This example performs several tasks on Google Compute Platform. It can be # run directly or can be imported into an interactive python session. This # can also serve as live integration tests. # # To run directly, use python 2.7 or greater: # - $ python gce_demo.py --help # to see the help screen # - $ python gce_demo.py # to run all demos / tests # # To run interactively: # - Make sure you have valid values in secrets.py # (For more information about setting up your credentials, see the # libcloud/common/google.py docstring) # - Run 'python' in this directory, then: # import gce_demo # gce = gce_demo.get_gce_driver() # gce.list_nodes() # etc. # - Or, to run the full demo from the interactive python shell: # import gce_demo # gce_demo.CLEANUP = False # optional # gce_demo.MAX_NODES = 4 # optional # gce_demo.DATACENTER = 'us-central1-a' # optional # gce_demo.main_compute() # 'compute' only demo # gce_demo.main_load_balancer() # 'load_balancer' only demo # gce_demo.main_dns() # 'dns only demo # gce_demo.main() # all demos / tests import os.path import sys import datetime import time try: import argparse except: print('This script uses the python "argparse" module. Please use Python ' '2.7 or greater.') raise try: import secrets except ImportError: print('"demos/secrets.py" not found.\n\n' 'Please copy secrets.py-dist to secrets.py and update the GCE* ' 'values with appropriate authentication information.\n' 'Additional information about setting these values can be found ' 'in the docstring for:\n' 'libcloud/common/google.py\n') sys.exit(1) # Add parent dir of this file's dir to sys.path (OS-agnostically) sys.path.append( os.path.normpath(os.path.join(os.path.dirname(__file__), os.path.pardir))) from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import ResourceNotFoundError from libcloud.loadbalancer.types import Provider as Provider_lb from libcloud.loadbalancer.providers import get_driver as get_driver_lb from libcloud.dns.types import Provider as Provider_dns from libcloud.dns.providers import get_driver as get_driver_dns from libcloud.dns.base import Record, Zone from libcloud.utils.py3 import PY3 if PY3: import urllib.request as url_req # pylint: disable=no-name-in-module else: import urllib2 as url_req # Maximum number of 1-CPU nodes to allow to run simultaneously MAX_NODES = 5 # String that all resource names created by the demo will start with # WARNING: Any resource that has a matching name will be destroyed. DEMO_BASE_NAME = 'lct' # Datacenter to create resources in DATACENTER = 'us-central1-f' BACKUP_DATACENTER = 'us-east1-c' # Clean up resources at the end (can be set to false in order to # inspect resources at the end of the run). Resources will be cleaned # at the beginning regardless. CLEANUP = True args = getattr(secrets, 'GCE_PARAMS', ()) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) # Add datacenter to kwargs for Python 2.5 compatibility kwargs = kwargs.copy() kwargs['datacenter'] = DATACENTER # ==== HELPER FUNCTIONS ==== def get_gce_driver(): driver = get_driver(Provider.GCE)(*args, **kwargs) return driver def get_gcelb_driver(gce_driver=None): # The GCE Load Balancer driver uses the GCE Compute driver for all of its # API calls. You can either provide the driver directly, or provide the # same authentication information so the LB driver can get its own # Compute driver. if gce_driver: driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver) else: driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs) return driver def get_dns_driver(gce_driver=None): # The Google DNS driver uses the GCE Compute driver for all of its # API calls. You can either provide the driver directly, or provide the # same authentication information so the LB driver can get its own # Compute driver. if gce_driver: driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver) else: driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs) return driver def create_mig(gce, mig_base_name, zone, template, postfix, num_instances=2): """ Creates MIG, sets named ports, modifies various text with 'postfix'. :param gce: An initalized GCE driver. :type gce: :class`GCENodeDriver` :param zone: Zone to create Managed Instance Group in. :type zone: :class:`GCEZone` or ``str`` :param template: Instance Template to use in creating MIG. :type template: :class:`GCEInstanceTemplate` :param postfix: string to append to mig name, etc. Example: 'east', 'central' :type postfix: ``str`` :param num_instances: number of instances to create in MIG. Default is 2. :type num_instances: ``int`` :returns: initialized Managed Instance Group. :rtype: :class:`GCEInstanceGroupManager` """ mig_name = '%s-%s' % (mig_base_name, postfix) mig = gce.ex_create_instancegroupmanager( mig_name, zone, template, num_instances, base_instance_name=mig_name, description='Demo for %s' % postfix) display(' Managed Instance Group [%s] "%s" created' % (postfix.upper(), mig.name)) display(' ... MIG instances created: %s' % ','.join([x['name'] for x in mig.list_managed_instances()])) # set the named_ports on the Instance Group. named_ports = [{'name': '%s-http' % DEMO_BASE_NAME, 'port': 80}] mig.set_named_ports(named_ports=named_ports) display(' ... MIG ports set: %s' % named_ports) return mig def display(title, resource_list=[]): """ Display a list of resources. :param title: String to be printed at the heading of the list. :type title: ``str`` :param resource_list: List of resources to display :type resource_list: Any ``object`` with a C{name} attribute """ print('=> %s' % title) for item in resource_list: if isinstance(item, Record): if item.name.startswith(DEMO_BASE_NAME): print('=> name=%s, type=%s' % (item.name, item.type)) else: print(' name=%s, type=%s' % (item.name, item.type)) elif isinstance(item, Zone): if item.domain.startswith(DEMO_BASE_NAME): print('=> name=%s, dnsname=%s' % (item.id, item.domain)) else: print(' name=%s, dnsname=%s' % (item.id, item.domain)) elif hasattr(item, 'name'): if item.name.startswith(DEMO_BASE_NAME): print('=> %s' % item.name) else: print(' %s' % item.name) else: if item.startswith(DEMO_BASE_NAME): print('=> %s' % item) else: print(' %s' % item) def cleanup_only(): start_time = datetime.datetime.now() display('Clean-up start time: %s' % str(start_time)) gce = get_gce_driver() # Get project info and print name project = gce.ex_get_project() display('Project: %s' % project.name) # == Get Lists of Everything and Display the lists (up to 10) == # These can either just return values for the current datacenter (zone) # or for everything. all_nodes = gce.list_nodes(ex_zone='all') display('Nodes:', all_nodes) all_addresses = gce.ex_list_addresses(region='all') display('Addresses:', all_addresses) all_volumes = gce.list_volumes(ex_zone='all') display('Volumes:', all_volumes) # This can return everything, but there is a large amount of overlap, # so we'll just get the sizes from the current zone. sizes = gce.list_sizes() display('Sizes:', sizes) # These are global firewalls = gce.ex_list_firewalls() display('Firewalls:', firewalls) networks = gce.ex_list_networks() display('Networks:', networks) images = gce.list_images() display('Images:', images) locations = gce.list_locations() display('Locations:', locations) zones = gce.ex_list_zones() display('Zones:', zones) snapshots = gce.ex_list_snapshots() display('Snapshots:', snapshots) gfrs = gce.ex_list_forwarding_rules(global_rules=True) display("Global Forwarding Rules", gfrs) targetproxies = gce.ex_list_targethttpproxies() display("Target HTTP Proxies", targetproxies) urlmaps = gce.ex_list_urlmaps() display("URLMaps", urlmaps) bes = gce.ex_list_backendservices() display("Backend Services", bes) migs = gce.ex_list_instancegroupmanagers(zone='all') display("Instance Group Managers", migs) its = gce.ex_list_instancetemplates() display("Instance Templates", its) hcs = gce.ex_list_healthchecks() display("Health Checks", hcs) # == Clean up any old demo resources == display('Cleaning up any "%s" resources' % DEMO_BASE_NAME) clean_up(gce, DEMO_BASE_NAME, None, gfrs + targetproxies + urlmaps + bes + hcs + migs + its) # == Pause to let cleanup occur and repopulate volume and node lists == if len(migs): time.sleep(10) all_volumes = gce.list_volumes(ex_zone='all') all_nodes = gce.list_nodes(ex_zone='all') clean_up(gce, DEMO_BASE_NAME, all_nodes, all_addresses + all_volumes + firewalls + networks + snapshots) volumes = gce.list_volumes() clean_up(gce, DEMO_BASE_NAME, None, volumes) end_time = datetime.datetime.now() display('Total runtime: %s' % str(end_time - start_time)) def clean_up(gce, base_name, node_list=None, resource_list=None): """ Destroy all resources that have a name beginning with 'base_name'. :param base_name: String with the first part of the name of resources to destroy :type base_name: ``str`` :keyword node_list: List of nodes to consider for deletion :type node_list: ``list`` of :class:`Node` :keyword resource_list: List of resources to consider for deletion :type resource_list: ``list`` of I{Resource Objects} """ if node_list is None: node_list = [] if resource_list is None: resource_list = [] # Use ex_destroy_multiple_nodes to destroy nodes del_nodes = [] for node in node_list: if node.name.startswith(base_name): del_nodes.append(node) result = gce.ex_destroy_multiple_nodes(del_nodes) for i, success in enumerate(result): if success: display(' Deleted %s' % del_nodes[i].name) else: display(' Failed to delete %s' % del_nodes[i].name) # Destroy everything else with just the destroy method for resrc in resource_list: if resrc.name.startswith(base_name): try: resrc.destroy() class_name = resrc.__class__.__name__ display(' Deleted %s (%s)' % (resrc.name, class_name)) except ResourceNotFoundError: display(' Not found: %s (%s)' % (resrc.name, resrc.__class__.__name__)) except: class_name = resrc.__class__.__name__ display(' Failed to Delete %s (%s)' % (resrc.name, class_name)) raise def main_compute(): start_time = datetime.datetime.now() display('Compute demo/test start time: %s' % str(start_time)) gce = get_gce_driver() # Get project info and print name project = gce.ex_get_project() display('Project: %s' % project.name) # == Get Lists of Everything and Display the lists (up to 10) == # These can either just return values for the current datacenter (zone) # or for everything. all_nodes = gce.list_nodes(ex_zone='all') display('Nodes:', all_nodes) all_addresses = gce.ex_list_addresses(region='all') display('Addresses:', all_addresses) all_volumes = gce.list_volumes(ex_zone='all') display('Volumes:', all_volumes) # This can return everything, but there is a large amount of overlap, # so we'll just get the sizes from the current zone. sizes = gce.list_sizes() display('Sizes:', sizes) # These are global firewalls = gce.ex_list_firewalls() display('Firewalls:', firewalls) subnetworks = gce.ex_list_subnetworks() display('Subnetworks:', subnetworks) networks = gce.ex_list_networks() display('Networks:', networks) images = gce.list_images() display('Images:', images) locations = gce.list_locations() display('Locations:', locations) zones = gce.ex_list_zones() display('Zones:', zones) snapshots = gce.ex_list_snapshots() display('Snapshots:', snapshots) # == Clean up any old demo resources == display('Cleaning up any "%s" resources' % DEMO_BASE_NAME) # Delete subnetworks first, networks last clean_up(gce, DEMO_BASE_NAME, None, subnetworks) clean_up(gce, DEMO_BASE_NAME, all_nodes, all_addresses + all_volumes + firewalls + snapshots + networks) # == Create a Legacy Network == display('Creating Legacy Network:') name = '%s-legacy-network' % DEMO_BASE_NAME cidr = '10.10.0.0/16' network_legacy = gce.ex_create_network(name, cidr) display(' Network %s created' % name) # == Delete the Legacy Network == display('Delete Legacy Network:') network_legacy.destroy() display(' Network %s delete' % name) # == Create an auto network == display('Creating Auto Network:') name = '%s-auto-network' % DEMO_BASE_NAME network_auto = gce.ex_create_network(name, cidr=None, mode='auto') display(' AutoNetwork %s created' % network_auto.name) # == Display subnetworks from the auto network == subnets = [] for sn in network_auto.subnetworks: subnets.append(gce.ex_get_subnetwork(sn)) display('Display subnetworks:', subnets) # == Delete the auto network == display('Delete Auto Network:') network_auto.destroy() display(' AutoNetwork %s deleted' % name) # == Create an custom network == display('Creating Custom Network:') name = '%s-custom-network' % DEMO_BASE_NAME network_custom = gce.ex_create_network(name, cidr=None, mode='custom') display(' Custom Network %s created' % network_custom.name) # == Create a subnetwork == display('Creating Subnetwork:') sname = '%s-subnetwork' % DEMO_BASE_NAME region = 'us-central1' cidr = '192.168.17.0/24' subnet = gce.ex_create_subnetwork(sname, cidr, network_custom, region) display(' Subnetwork %s created' % subnet.name) # Refresh object, now that it has a subnet network_custom = gce.ex_get_network(name) # == Display subnetworks from the auto network == subnets = [] for sn in network_custom.subnetworks: subnets.append(gce.ex_get_subnetwork(sn)) display('Display custom subnetworks:', subnets) # == Launch instance in custom subnetwork == display('Creating Node in custom subnetwork:') name = '%s-subnet-node' % DEMO_BASE_NAME node_1 = gce.create_node(name, 'g1-small', 'debian-8', ex_disk_auto_delete=True, ex_network=network_custom, ex_subnetwork=subnet) display(' Node %s created' % name) # == Destroy instance in custom subnetwork == display('Destroying Node in custom subnetwork:') node_1.destroy() display(' Node %s destroyed' % name) # == Delete an subnetwork == display('Delete Custom Subnetwork:') subnet.destroy() display(' Custom Subnetwork %s deleted' % sname) is_deleted = False while not is_deleted: time.sleep(3) try: subnet = gce.ex_get_subnetwork(sname, region) except ResourceNotFoundError: is_deleted = True # == Delete the auto network == display('Delete Custom Network:') network_custom.destroy() display(' Custom Network %s deleted' % name) # == Create Node with disk auto-created == if MAX_NODES > 1: display('Creating a node with boot/local-ssd using GCE structure:') name = '%s-gstruct' % DEMO_BASE_NAME img_url = "projects/debian-cloud/global/images/" img_url += "backports-debian-7-wheezy-v20141205" disk_type_url = "projects/%s/zones/us-central1-f/" % project.name disk_type_url += "diskTypes/local-ssd" gce_disk_struct = [ { "type": "PERSISTENT", "deviceName": '%s-gstruct' % DEMO_BASE_NAME, "initializeParams": { "diskName": '%s-gstruct' % DEMO_BASE_NAME, "sourceImage": img_url }, "boot": True, "autoDelete": True }, { "type": "SCRATCH", "deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME, "initializeParams": { "diskType": disk_type_url }, "autoDelete": True } ] node_gstruct = gce.create_node(name, 'n1-standard-1', None, 'us-central1-f', ex_disks_gce_struct=gce_disk_struct) num_disks = len(node_gstruct.extra['disks']) display(' Node %s created with %d disks' % (node_gstruct.name, num_disks)) display('Creating Node with auto-created SSD:') name = '%s-np-node' % DEMO_BASE_NAME node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7', ex_tags=['libcloud'], ex_disk_type='pd-ssd', ex_disk_auto_delete=False) display(' Node %s created' % name) # Stop the node and change to a custom machine type (e.g. size) display('Stopping node, setting custom size, starting node:') name = '%s-np-node' % DEMO_BASE_NAME gce.ex_stop_node(node_1) gce.ex_set_machine_type(node_1, 'custom-2-4096') # 2 vCPU, 4GB RAM gce.ex_start_node(node_1) node_1 = gce.ex_get_node(name) display(' %s: state=%s, size=%s' % (name, node_1.extra['status'], node_1.size)) # == Create, and attach a disk == display('Creating a new disk:') disk_name = '%s-attach-disk' % DEMO_BASE_NAME volume = gce.create_volume(10, disk_name) if gce.attach_volume(node_1, volume, ex_auto_delete=True): display(' Attached %s to %s' % (volume.name, node_1.name)) display(' Disabled auto-delete for %s on %s' % (volume.name, node_1.name)) gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False) if CLEANUP: # == Detach the disk == if gce.detach_volume(volume, ex_node=node_1): display(' Detached %s from %s' % (volume.name, node_1.name)) # == Create Snapshot == display('Creating a snapshot from existing disk:') # Create a disk to snapshot vol_name = '%s-snap-template' % DEMO_BASE_NAME image = gce.ex_get_image('debian-7') vol = gce.create_volume(None, vol_name, image=image) display('Created disk %s to shapshot:' % DEMO_BASE_NAME) # Snapshot volume snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME) display(' Snapshot %s created' % snapshot.name) # == Create Node with existing disk == display('Creating Node with existing disk:') name = '%s-persist-node' % DEMO_BASE_NAME # Use objects this time instead of names # Get latest Debian 7 image image = gce.ex_get_image('debian-7') # Get Machine Size size = gce.ex_get_size('n1-standard-1') # Create Disk from Snapshot created above volume_name = '%s-boot-disk' % DEMO_BASE_NAME volume = gce.create_volume(None, volume_name, snapshot=snapshot) display(' Created %s from snapshot' % volume.name) # Create Node with Disk node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'], ex_boot_disk=volume, ex_disk_auto_delete=False) display(' Node %s created with attached disk %s' % (node_2.name, volume.name)) # == Update Tags for Node == display('Updating Tags for %s:' % node_2.name) tags = node_2.extra['tags'] tags.append('newtag') if gce.ex_set_node_tags(node_2, tags): display(' Tags updated for %s' % node_2.name) check_node = gce.ex_get_node(node_2.name) display(' New tags: %s' % check_node.extra['tags']) # == Setting Metadata for Node == display('Setting Metadata for %s:' % node_2.name) if gce.ex_set_node_metadata(node_2, {'foo': 'bar', 'baz': 'foobarbaz'}): display(' Metadata updated for %s' % node_2.name) check_node = gce.ex_get_node(node_2.name) display(' New Metadata: %s' % check_node.extra['metadata']) # == Create Multiple nodes at once == base_name = '%s-multiple-nodes' % DEMO_BASE_NAME number = MAX_NODES - 2 if number > 0: display('Creating Multiple Nodes (%s):' % number) multi_nodes = gce.ex_create_multiple_nodes( base_name, size, image, number, ex_tags=['libcloud'], ex_disk_auto_delete=True) for node in multi_nodes: display(' Node %s created' % node.name) # == Create a Network == display('Creating Network:') name = '%s-network' % DEMO_BASE_NAME cidr = '10.10.0.0/16' network_1 = gce.ex_create_network(name, cidr) display(' Network %s created' % network_1.name) # == Create a Firewall == display('Creating a Firewall:') name = '%s-firewall' % DEMO_BASE_NAME allowed = [{'IPProtocol': 'tcp', 'ports': ['3141']}] firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1, source_tags=['libcloud']) display(' Firewall %s created' % firewall_1.name) # == Create a Static Address == display('Creating an Address:') name = '%s-address' % DEMO_BASE_NAME address_1 = gce.ex_create_address(name) display(' Address %s created with IP %s' % (address_1.name, address_1.address)) # == List Updated Resources in current zone/region == display('Updated Resources in current zone/region') nodes = gce.list_nodes() display('Nodes:', nodes) addresses = gce.ex_list_addresses() display('Addresses:', addresses) firewalls = gce.ex_list_firewalls() display('Firewalls:', firewalls) subnetworks = gce.ex_list_subnetworks() display('Subnetworks:', subnetworks) networks = gce.ex_list_networks() display('Networks:', networks) snapshots = gce.ex_list_snapshots() display('Snapshots:', snapshots) if CLEANUP: display('Cleaning up %s resources created' % DEMO_BASE_NAME) clean_up(gce, DEMO_BASE_NAME, None, subnetworks) clean_up(gce, DEMO_BASE_NAME, nodes, addresses + firewalls + snapshots + networks) volumes = gce.list_volumes() clean_up(gce, DEMO_BASE_NAME, None, volumes) end_time = datetime.datetime.now() display('Total runtime: %s' % str(end_time - start_time)) # ==== LOAD BALANCER CODE STARTS HERE ==== def main_load_balancer(): start_time = datetime.datetime.now() display('Load-balancer demo/test start time: %s' % str(start_time)) gce = get_gce_driver() gcelb = get_gcelb_driver(gce) # Get project info and print name project = gce.ex_get_project() display('Project: %s' % project.name) # Existing Balancers balancers = gcelb.list_balancers() display('Load Balancers', balancers) # Protocols protocols = gcelb.list_protocols() display('Protocols', protocols) # Healthchecks healthchecks = gcelb.ex_list_healthchecks() display('Health Checks', healthchecks) # This demo is based on the GCE Load Balancing Quickstart described here: # https://developers.google.com/compute/docs/load-balancing/lb-quickstart # == Clean-up and existing demo resources == all_nodes = gce.list_nodes(ex_zone='all') firewalls = gce.ex_list_firewalls() display('Cleaning up any "%s" resources' % DEMO_BASE_NAME) clean_up(gce, DEMO_BASE_NAME, all_nodes, balancers + healthchecks + firewalls) # == Create 3 nodes to balance between == startup_script = ('apt-get -y update && ' 'apt-get -y install apache2 && ' 'hostname > /var/www/index.html') tag = '%s-www' % DEMO_BASE_NAME base_name = '%s-www' % DEMO_BASE_NAME image = gce.ex_get_image('debian-7') size = gce.ex_get_size('n1-standard-1') number = 3 display('Creating %d nodes' % number) metadata = {'items': [{'key': 'startup-script', 'value': startup_script}]} lb_nodes = gce.ex_create_multiple_nodes( base_name, size, image, number, ex_tags=[tag], ex_metadata=metadata, ex_disk_auto_delete=True, ignore_errors=False) display('Created Nodes', lb_nodes) # == Create a Firewall for instances == display('Creating a Firewall') name = '%s-firewall' % DEMO_BASE_NAME allowed = [{'IPProtocol': 'tcp', 'ports': ['80']}] firewall = gce.ex_create_firewall(name, allowed, target_tags=[tag]) display(' Firewall %s created' % firewall.name) # == Create a Health Check == display('Creating a HealthCheck') name = '%s-healthcheck' % DEMO_BASE_NAME # These are all the default values, but listed here as an example. To # create a healthcheck with the defaults, only name is required. hc = gcelb.ex_create_healthcheck( name, host=None, path='/', port='80', interval=5, timeout=5, unhealthy_threshold=2, healthy_threshold=2) display('Healthcheck %s created' % hc.name) # == Create Load Balancer == display('Creating Load Balancer') name = '%s-lb' % DEMO_BASE_NAME port = 80 protocol = 'tcp' algorithm = None members = lb_nodes[:2] # Only attach the first two initially healthchecks = [hc] balancer = gcelb.create_balancer(name, port, protocol, algorithm, members, ex_healthchecks=healthchecks) display(' Load Balancer %s created' % balancer.name) # == Attach third Node == display('Attaching additional node to Load Balancer') member = balancer.attach_compute_node(lb_nodes[2]) display(' Attached %s to %s' % (member.id, balancer.name)) # == Show Balancer Members == members = balancer.list_members() display('Load Balancer Members') for member in members: display(' ID: %s IP: %s' % (member.id, member.ip)) # == Remove a Member == display('Removing a Member') detached = members[0] detach = balancer.detach_member(detached) if detach: display(' Member %s detached from %s' % (detached.id, balancer.name)) # == Show Updated Balancer Members == members = balancer.list_members() display('Updated Load Balancer Members') for member in members: display(' ID: %s IP: %s' % (member.id, member.ip)) # == Reattach Member == display('Reattaching Member') member = balancer.attach_member(detached) display(' Member %s attached to %s' % (member.id, balancer.name)) # == Test Load Balancer by connecting to it multiple times == PAUSE = 60 display('Sleeping for %d seconds for LB members to serve...' % PAUSE) time.sleep(PAUSE) rounds = 200 url = 'http://%s/' % balancer.ip line_length = 75 display('Connecting to %s %s times' % (url, rounds)) for x in range(rounds): response = url_req.urlopen(url) if PY3: output = str(response.read(), encoding='utf-8').strip() else: output = response.read().strip() if 'www-001' in output: padded_output = output.center(line_length) elif 'www-002' in output: padded_output = output.rjust(line_length) else: padded_output = output.ljust(line_length) sys.stdout.write('\r%s' % padded_output) sys.stdout.flush() time.sleep(.25) print('') if CLEANUP: balancers = gcelb.list_balancers() healthchecks = gcelb.ex_list_healthchecks() nodes = gce.list_nodes(ex_zone='all') firewalls = gce.ex_list_firewalls() display('Cleaning up %s resources created' % DEMO_BASE_NAME) clean_up(gce, DEMO_BASE_NAME, nodes, balancers + healthchecks + firewalls) end_time = datetime.datetime.now() display('Total runtime: %s' % str(end_time - start_time)) # ==== BACKEND SERVICE LOAD BALANCER CODE STARTS HERE ==== def main_backend_service(): start_time = datetime.datetime.now() display('Backend Service w/Global Forwarding Rule demo/test start time: %s' % str(start_time)) gce = get_gce_driver() # Get project info and print name project = gce.ex_get_project() display('Project: %s' % project.name) # Based on the instructions at: # https://cloud.google.com/compute/docs/load-balancing/http/#overview zone_central = DATACENTER zone_east = BACKUP_DATACENTER it_name = '%s-instancetemplate' % DEMO_BASE_NAME mig_name = '%s-mig' % DEMO_BASE_NAME hc_name = '%s-healthcheck' % DEMO_BASE_NAME bes_name = '%s-bes' % DEMO_BASE_NAME urlmap_name = '%s-urlmap' % DEMO_BASE_NAME targethttpproxy_name = '%s-httptargetproxy' % DEMO_BASE_NAME address_name = '%s-address' % DEMO_BASE_NAME gfr_name = '%s-gfr' % DEMO_BASE_NAME firewall_name = '%s-firewall' % DEMO_BASE_NAME startup_script = ('apt-get -y update && ' 'apt-get -y install apache2 && ' 'echo "$(hostname)" > /var/www/html/index.html') tag = '%s-mig-www' % DEMO_BASE_NAME metadata = {'items': [{'key': 'startup-script', 'value': startup_script}]} mig_central = None mig_east = None bes = None urlmap = None tp = None address = None gfr = None firewall = None display('Create a BackendService') # == Create an Instance Template == it = gce.ex_create_instancetemplate(it_name, size='n1-standard-1', image='debian-8', network='default', metadata=metadata, tags=[tag]) display(' InstanceTemplate "%s" created' % it.name) # == Create a MIG == mig_central = create_mig(gce, mig_name, zone_central, it, 'central') mig_east = create_mig(gce, mig_name, zone_east, it, 'east') # == Create a Health Check == hc = gce.ex_create_healthcheck(hc_name, host=None, path='/', port='80', interval=30, timeout=10, unhealthy_threshold=10, healthy_threshold=1) display(' Healthcheck %s created' % hc.name) # == Create a Backend Service == be_central = gce.ex_create_backend( instance_group=mig_central.instance_group) be_east = gce.ex_create_backend(instance_group=mig_east.instance_group) bes = gce.ex_create_backendservice( bes_name, [hc], backends=[be_central, be_east], port_name='%s-http' % DEMO_BASE_NAME, protocol='HTTP', description='%s bes desc' % DEMO_BASE_NAME, timeout_sec=60, enable_cdn=False) display(' Backend Service "%s" created' % bes.name) # == Create a URLMap == urlmap = gce.ex_create_urlmap(urlmap_name, default_service=bes) display(' URLMap "%s" created' % urlmap.name) # == Create a Target (HTTP) Proxy == tp = gce.ex_create_targethttpproxy(targethttpproxy_name, urlmap) display(' TargetProxy "%s" created' % tp.name) # == Create a Static Address == address = gce.ex_create_address(address_name, region='global') display(' Address "%s" created with IP "%s"' % (address.name, address.address)) # == Create a Global Forwarding Rule == gfr = gce.ex_create_forwarding_rule( gfr_name, target=tp, address=address, port_range='80', description='%s libcloud forwarding rule http test' % DEMO_BASE_NAME, global_rule=True) display(' Global Forwarding Rule "%s" created' % (gfr.name)) # == Create a Firewall for instances == allowed = [{'IPProtocol': 'tcp', 'ports': ['80']}] firewall = gce.ex_create_firewall(firewall_name, allowed, target_tags=[tag]) display(' Firewall %s created' % firewall.name) # TODO(supertom): launch instances to demostrate that it works # take backends out of service. Adding in this functionality # will also add 10-15 minutes to the demo. # display("Sleeping for 10 minutes, starting at %s" % # str(datetime.datetime.now())) # time.sleep(600) if CLEANUP: display('Cleaning up %s resources created' % DEMO_BASE_NAME) clean_up(gce, DEMO_BASE_NAME, None, resource_list=[firewall, gfr, address, tp, urlmap, bes, hc, mig_central, mig_east, it]) end_time = datetime.datetime.now() display('Total runtime: %s' % str(end_time - start_time)) # ==== GOOGLE DNS CODE STARTS HERE ==== def main_dns(): start_time = datetime.datetime.now() display('DNS demo/test start time: %s' % str(start_time)) gce = get_gce_driver() gdns = get_dns_driver() # Get project info and print name project = gce.ex_get_project() display('Project: %s' % project.name) # Get list of managed zones zones = gdns.iterate_zones() display('Zones', zones) # Get list of records zones = gdns.iterate_zones() for z in zones: records = gdns.iterate_records(z) display('Records for managed zone "%s"' % z.id, records) # TODO(erjohnso): Finish this DNS section. Challenging in that you need to # own a domain, so testing will require user customization. Perhaps a new # command-line required flag unless --skip-dns is supplied. Also, real # e2e testing should try to do DNS lookups on new records, but DNS TTL # and propagation delays will introduce limits on what can be tested. end_time = datetime.datetime.now() display('Total runtime: %s' % str(end_time - start_time)) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Google Cloud Platform Demo / Live Test Script') parser.add_argument("--compute", help="perform compute demo / live tests", dest="compute", action="store_true") parser.add_argument("--load-balancer", help="perform load-balancer demo / live tests", dest="lb", action="store_true") parser.add_argument("--backend-service", help="perform backend-service demo / live tests", dest="bes", action="store_true") parser.add_argument("--dns", help="perform DNS demo / live tests", dest="dns", action="store_true") parser.add_argument("--cleanup-only", help="perform clean-up (skips all tests)", dest="cleanup", action="store_true") cl_args = parser.parse_args() if cl_args.cleanup: cleanup_only() else: if cl_args.compute: main_compute() if cl_args.lb: main_load_balancer() if cl_args.dns: main_dns() if cl_args.bes: main_backend_service() apache-libcloud-2.2.1/demos/example_aliyun_slb.py0000664000175000017500000000434613153541406021764 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.types import Provider as NodeProvider from libcloud.compute.providers import get_driver as get_node_driver from libcloud.loadbalancer.providers import get_driver from libcloud.loadbalancer.base import Algorithm, Member from libcloud.loadbalancer.types import Provider SLBDriver = get_driver(Provider.ALIYUN_SLB) ECSDriver = get_node_driver(NodeProvider.ALIYUN_ECS) region = 'cn-hangzhou' your_access_key_id = '' your_access_key_secret = '' slb = SLBDriver(your_access_key_id, your_access_key_secret, region=region) ecs = ECSDriver(your_access_key_id, your_access_key_secret, region=region) protos = slb.list_protocols() print('Found %d protocols: %s' % (len(protos), protos)) balancers = slb.list_balancers() print('Found %d load balancers' % len(balancers)) print(balancers) if len(balancers) > 0: b1 = balancers[0] print('Delete %s' % b1) slb.destroy_balancer(b1) else: extra = {'AddressType': 'internet', 'Bandwidth': 1, 'StickySession': 'off', 'HealthCheck': 'off'} nodes = ecs.list_nodes() print('Found %d nodes' % len(nodes)) members = [Member(node.id, node.public_ips[0], 80, extra={'Weight': 50 * (i + 1)}) for i, node in enumerate(nodes)] new_b = slb.create_balancer('test-balancer', 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN, members, **extra) print('Created balancer %s' % new_b) apache-libcloud-2.2.1/demos/secrets.py-dist0000664000175000017500000000344613153541406020521 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Make a copy of this file named 'secrets.py' and add your credentials there. # Note you can run unit tests without setting your credentials. BLUEBOX_PARAMS = ('customer_id', 'api_key') BRIGHTBOX_PARAMS = ('client_id', 'client_secret') EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) # Service Account Authentication GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key') # GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') IBM_PARAMS = ('user', 'secret') # OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) OPENNEBULA_PARAMS = ('user', 'key') OPSOURCE_PARAMS = ('user', 'password') RACKSPACE_PARAMS = ('user', 'key') SLICEHOST_PARAMS = ('key',) SOFTLAYER_PARAMS = ('user', 'api_key') VCLOUD_PARAMS = ('user', 'secret') VOXEL_PARAMS = ('key', 'secret') VPSNET_PARAMS = ('user', 'key') apache-libcloud-2.2.1/demos/example_aliyun_ecs.py0000664000175000017500000000530313153541406021750 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.base import NodeAuthPassword ECSDriver = get_driver(Provider.ALIYUN_ECS) region = 'cn-hangzhou' your_access_key_id = '' your_access_key_secret = '' ecs = ECSDriver(your_access_key_id, your_access_key_secret, region=region) sizes = ecs.list_sizes() small = sizes[1] locations = ecs.list_locations() location = None for each in locations: if each.id == region: location = each break if location is None: print('could not find cn-qingdao location') sys.exit(-1) print(location.name) images = ecs.list_images() print('Found %d images' % len(images)) for each in images: if 'ubuntu' in each.id.lower(): image = each break else: image = images[0] print('Use image %s' % image) sgs = ecs.ex_list_security_groups() print('Found %d security groups' % len(sgs)) if len(sgs) == 0: sg = ecs.ex_create_security_group(description='test') print('Create security group %s' % sg) else: sg = sgs[0].id print('Use security group %s' % sg) nodes = ecs.list_nodes() print('Found %d nodes' % len(nodes)) if len(nodes) == 0: print('Starting create a new node') data_disk = { 'size': 5, 'category': ecs.disk_categories.CLOUD, 'disk_name': 'data_disk1', 'delete_with_instance': True} auth = NodeAuthPassword('P@$$w0rd') ex_internet_charge_type = ecs.internet_charge_types.BY_TRAFFIC node = ecs.create_node(image=image, size=small, name='test', ex_security_group_id=sg, ex_internet_charge_type=ex_internet_charge_type, ex_internet_max_bandwidth_out=1, ex_data_disk=data_disk, auth=auth) print('Created node %s' % node) nodes = ecs.list_nodes() for each in nodes: print('Found node %s' % each) apache-libcloud-2.2.1/demos/example_aliyun_oss.py0000664000175000017500000000523113153541406022002 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.storage.types import Provider from libcloud.storage.providers import get_driver OSSDriver = get_driver(Provider.ALIYUN_OSS) your_access_key_id = '' your_access_key_secret = '' oss = OSSDriver(your_access_key_id, your_access_key_secret) container_name = 'CONTAINER_NAME_FOR_TEST' object_name = 'OBJECT_NAME_FOR_TEST' local_file_path = 'LOCAL_FILE_FULL_PATH_TO_UPLOAD' upload_object_name = 'OBJECT_NAME_FOR_UPLOAD_FILE' for container in oss.iterate_containers(): print('container: %s' % container) c1 = oss.get_container(container_name) print('Got container %s:' % c1) objects = c1.list_objects() count = len(objects) print('Has %d objects' % count) objects = oss.list_container_objects(c1, ex_prefix='en') print('Has %d objects with prefix "en"' % len(objects)) for each in objects: print(each) obj = oss.get_object(container_name, object_name) print('Got object %s:' % obj) # Download object oss.download_object(obj, object_name, overwrite_existing=True) for trunk in oss.download_object_as_stream(obj): print(trunk) # Upload object obj = oss.upload_object(local_file_path, c1, upload_object_name) # Upload multipart uploads = list(oss.ex_iterate_multipart_uploads(c1)) print('Found %d incompleted uploads' % len(uploads)) if len(uploads) > 0: oss.ex_abort_all_multipart_uploads(c1) print('Abort them all') def data_iter(limit): i = 0 while True: yield i i += 1 if i >= limit: break print('Starting to upload 1MB using multipart api') one_mb = 1024 * 1024 obj = oss.upload_object_via_stream(data_iter(one_mb), c1, upload_object_name) print('Finish uploading') # Delete objects print('Delete object %s' % obj) oss.delete_object(obj) # Create container # c2 = oss.create_container(container_name='20160117') # c2 = oss.create_container(container_name='20160117', # ex_location='oss-cn-beijing') # c2_got = oss.get_container('20160117') apache-libcloud-2.2.1/demos/compute_demo.py0000664000175000017500000000707213153541406020567 0ustar kamikami00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This example provides both a running script (invoke from command line) # and an importable module one can play with in Interactive Mode. # # See docstrings for usage examples. # try: import secrets except ImportError: secrets = None import os.path import sys # Add parent dir of this file's dir to sys.path (OS-agnostically) sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), os.path.pardir))) from libcloud.common.types import InvalidCredsError from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from pprint import pprint def get_demo_driver(provider_name='RACKSPACE', *args, **kwargs): """An easy way to play with a driver interactively. # Load credentials from secrets.py: >>> from compute_demo import get_demo_driver >>> driver = get_demo_driver('RACKSPACE') # Or, provide credentials: >>> from compute_demo import get_demo_driver >>> driver = get_demo_driver('RACKSPACE', 'username', 'api_key') # Note that these parameters vary by driver ^^^ # Do things like the demo: >>> driver.load_nodes() >>> images = driver.load_images() >>> sizes = driver.load_sizes() # And maybe do more than that: >>> node = driver.create_node( ... name='my_first_node', ... image=images[0], ... size=sizes[0], ... ) >>> node.destroy() """ provider_name = provider_name.upper() DriverClass = get_driver(getattr(Provider, provider_name)) if not args: args = getattr(secrets, provider_name + '_PARAMS', ()) if not kwargs: kwargs = getattr(secrets, provider_name + '_KEYWORD_PARAMS', {}) try: return DriverClass(*args, **kwargs) except InvalidCredsError: raise InvalidCredsError( 'valid values should be put in secrets.py') def main(argv): """Main Compute Demo When invoked from the command line, it will connect using secrets.py (see secrets.py-dist for instructions and examples), and perform the following tasks: - List current nodes - List available images (up to 10) - List available sizes (up to 10) """ try: driver = get_demo_driver() except InvalidCredsError: e = sys.exc_info()[1] print("Invalid Credentials: " + e.value) return 1 try: print(">> Loading nodes...") pprint(driver.list_nodes()) print(">> Loading images... (showing up to 10)") pprint(driver.list_images()[:10]) print(">> Loading sizes... (showing up to 10)") pprint(driver.list_sizes()[:10]) except Exception: e = sys.exc_info()[1] print("A fatal error occurred: " + e) return 1 if __name__ == '__main__': sys.exit(main(sys.argv)) apache-libcloud-2.2.1/demos/example_openstack.py0000664000175000017500000000071713153541406021610 0ustar kamikami00000000000000from pprint import pprint from libcloud.compute.providers import get_driver from libcloud.compute.types import Provider Openstack = get_driver(Provider.OPENSTACK) con = Openstack( 'admin', 'password', ex_force_auth_url='http://23.12.198.36/identity/v3/auth/tokens', ex_force_base_url='http://23.12.198.36:8774/v2.1', api_version='2.0', ex_tenant_name='demo') pprint(con.list_locations()) pprint(con.list_images()) pprint(con.list_nodes()) apache-libcloud-2.2.1/NOTICE0000664000175000017500000000040112701023453015312 0ustar kamikami00000000000000Apache Libcloud Copyright (c) 2010-2015 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). This product includes software developed by Cloudkick (http://www.cloudkick.com/). apache-libcloud-2.2.1/.pylintrc0000664000175000017500000000207613153541406016272 0ustar kamikami00000000000000[MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=test # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= [MESSAGES CONTROL] disable=redefined-builtin,too-many-arguments,too-few-public-methods,missing-docstring,invalid-name,abstract-method,no-self-use [TYPECHECK] # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. generated-members=async_request,objects [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching names used for dummy variables (i.e. not used). dummy-variables-rgx=_|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= apache-libcloud-2.2.1/example_dns.py0000664000175000017500000000204412701023453017264 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pprint import pprint from libcloud.dns.types import Provider from libcloud.dns.providers import get_driver Zerigo = get_driver(Provider.ZERIGO) driver = Zerigo('email', 'key') zones = driver.list_zones() pprint(zones) records = zones[0].list_records() pprint(records) apache-libcloud-2.2.1/contrib/0000775000175000017500000000000013160535107016057 5ustar kamikami00000000000000apache-libcloud-2.2.1/contrib/__init__.py0000664000175000017500000000000013153541406020157 0ustar kamikami00000000000000apache-libcloud-2.2.1/contrib/update_google_prices.py0000775000175000017500000000644613153541406022632 0ustar kamikami00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Loads Google Cloud Platform prices and updates the `pricing.json` data file. """ import os import json import simplejson import sys import time import urllib2 import utils BASE_PATH = os.path.dirname(os.path.abspath(__file__)) PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json') PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH) GOOGLE_CLOUD_PRICES = 'https://cloudpricingcalculator.appspot.com/static/data/pricelist.json' def main(argv): # Read the current pricing data. libcloud_data = {} with open(PRICING_FILE_PATH, 'r') as libcloud_in: libcloud_data = json.loads(libcloud_in.read()) # Download the current Google Cloud Platform pricing. req = urllib2.Request(GOOGLE_CLOUD_PRICES, '') google_ext_prices = json.loads(urllib2.urlopen(req).read()) if 'gcp_price_list' not in google_ext_prices: sys.stderr.write('Google Cloud pricing data missing "gcp_price_list" node\n') sys.exit(1) # This is a map from regions used in the pricing JSON file to the regions as # reflected in the Google Cloud Platform documentation and APIs. pricing_to_region = { 'us': 'us', 'eu': 'europe', # alias for 'europe' 'europe': 'europe', 'apac': 'asia', # alias for 'asia' 'asia': 'asia', 'au': 'australia', # alias for 'australia' 'australia': 'australia' } # Initialize Google Cloud Platform regions. for _, region in pricing_to_region.iteritems(): libcloud_data['compute']['google_%s' % region] = {} # Update Google Compute Engine pricing. gcp_price_list = google_ext_prices['gcp_price_list'] gce_vm_prefix = 'CP-COMPUTEENGINE-VMIMAGE-' for name, prices in gcp_price_list.iteritems(): if not name.startswith(gce_vm_prefix): continue short_name = name[len(gce_vm_prefix):] machine_type = short_name.lower() for key, price in prices.iteritems(): if key in pricing_to_region: region = pricing_to_region[key] libcloud_data['compute']['google_%s' % region][machine_type] = price # Update last-modified timestamp. libcloud_data['updated'] = int(time.time()) # Write updated price list. with open(PRICING_FILE_PATH, 'w') as libcloud_out: json_str = simplejson.dumps(libcloud_data, indent=4 * ' ', item_sort_key=utils.sortKeysNumerically) libcloud_out.write(json_str) if __name__ == '__main__': sys.exit(main(sys.argv)) apache-libcloud-2.2.1/contrib/migrate_paths.sh0000775000175000017500000000430512701023453021243 0ustar kamikami00000000000000#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Script for migrating from the old style libcloud paths (pre 0.5) to the new # ones. # THIS SCRIPT WILL MODIFY FILES IN PLACE. BE SURE TO BACKUP THEM BEFORE RUNNING # IT. LIBCLOUD TEAM CANNOT BE RESPONSIBLE FOR ANY DAMAGE CAUSED BY THIS SCRIPT. # Note: If you are on OS X / FreeBSD, you need to install GNU sed. DIRECTORY=$1 SED=`which gsed gnused sed` for value in $SED do SED=${value} break done if [ ! $DIRECTORY ]; then echo "Usage: ./migrate_paths.sh " exit 1 fi OLD_PATHS[0]="libcloud.base" OLD_PATHS[1]="libcloud.deployment" OLD_PATHS[2]="libcloud.drivers" OLD_PATHS[3]="libcloud.ssh" OLD_PATHS[4]="libcloud.types" OLD_PATHS[5]="libcloud.providers" UPDATED_PATHS[0]="libcloud.compute.base" UPDATED_PATHS[1]="libcloud.compute.deployment" UPDATED_PATHS[2]="libcloud.compute.drivers" UPDATED_PATHS[3]="libcloud.compute.ssh" UPDATED_PATHS[4]="libcloud.compute.types" UPDATED_PATHS[5]="libcloud.compute.providers" for (( i = 0 ; i < ${#OLD_PATHS[@]} ; i++ )) do old_path=${OLD_PATHS[$i]} new_path=${UPDATED_PATHS[$i]} cmd1="find ${DIRECTORY} -name '*.py' -type f -print0 | xargs -0 ${SED} -i -e 's/^from ${old_path} import/from ${new_path} import/g'" cmd2="find ${DIRECTORY} -name '*.py' -type f -print0 | xargs -0 ${SED} -i -e 's/^import ${old_path}/import ${new_path}/g'" echo "Migrating: ${old_path} -> ${new_path}" eval "$cmd1" eval "$cmd2" done apache-libcloud-2.2.1/contrib/generate_provider_logos_collage_image.py0000775000175000017500000001017512701023453026173 0ustar kamikami00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Script which generates a collage of provider logos from multiple provider # logo files. # # It works in two steps: # # 1. Resize all the provider logo files (reduce the dimensions) # 2. Assemble a final image from the resized images import os import sys import argparse import subprocess import random from os.path import join as pjoin DIMENSIONS = '150x150' # Dimensions of the resized image (x) GEOMETRY = '+4+4' # How to arrange images (++) TO_CREATE_DIRS = ['resized/', 'final/'] def setup(output_path): """ Create missing directories. """ for directory in TO_CREATE_DIRS: final_path = pjoin(output_path, directory) if not os.path.exists(final_path): os.makedirs(final_path) def get_logo_files(input_path): logo_files = os.listdir(input_path) logo_files = [name for name in logo_files if 'resized' not in name and name.endswith('png')] logo_files = [pjoin(input_path, name) for name in logo_files] return logo_files def resize_images(logo_files, output_path): resized_images = [] for logo_file in logo_files: name, ext = os.path.splitext(os.path.basename(logo_file)) new_name = '%s%s' % (name, ext) out_name = pjoin(output_path, 'resized/', new_name) print 'Resizing image: %(name)s' % {'name': logo_file} values = {'name': logo_file, 'out_name': out_name, 'dimensions': DIMENSIONS} cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s' cmd = cmd % values subprocess.call(cmd, shell=True) resized_images.append(out_name) return resized_images def assemble_final_image(resized_images, output_path): final_name = pjoin(output_path, 'final/logos.png') random.shuffle(resized_images) values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY, 'out_name': final_name} cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s' cmd = cmd % values print 'Generating final image: %(name)s' % {'name': final_name} subprocess.call(cmd, shell=True) def main(input_path, output_path): if not os.path.exists(input_path): print('Path doesn\'t exist: %s' % (input_path)) sys.exit(2) if not os.path.exists(output_path): print('Path doesn\'t exist: %s' % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--input-path', action='store', help='Path to directory which contains provider ' 'logo files') parser.add_argument('--output-path', action='store', help='Path where the new files will be written') args = parser.parse_args() input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) main(input_path=input_path, output_path=output_path) apache-libcloud-2.2.1/contrib/utils.py0000664000175000017500000000267312701023453017575 0ustar kamikami00000000000000#!/usr/bin/python # # Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ import re def splitStringWithNumbers(string): """Splits input string into a list of items, numeric and otherwise. Returns a list of values, each either an interpreted number, or a substring of the original input. E.g., 'abc-123-def' => ['abc-', 123, '-def'] """ rawParts = re.split(r'(\d+)', string) # Filter out empty strings. nonEmptyParts = filter(None, rawParts) # Convert any numeric strings to numbers. def splitHelper(nonEmptyParts): for part in nonEmptyParts: if re.match(r'\d+', part): yield int(part) else: yield part return list(splitHelper(nonEmptyParts)) def sortKeysNumerically(key_value): key, value = key_value return splitStringWithNumbers(key) apache-libcloud-2.2.1/contrib/generate_contributor_list.py0000775000175000017500000001403012701023453023705 0ustar kamikami00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Script which generates markdown formatted list of contributors. It generates # this list by parsing the "CHANGES" file. # # Usage: # # 1. Generate a list of contributors with tickets for all versions: # # ./contrib/generate_contributor_list.py --changes-path=CHANGES.rst \ # --include-tickets # # 2. Generate a list of contributors for a release without tickets # # ./contrib/generate_contributor_list.py --changes-path=CHANGES.rst \ # --versions=0.13.0 # 3. Generate a list of contributors with tickets for multiple versions # # ./contrib/generate_contributor_list.py --changes-path=CHANGES.rst \ # --include-tickets # --versions 0.11.0 0.12.0 from __future__ import with_statement import re import argparse from collections import defaultdict JIRA_URL = 'https://issues.apache.org/jira/browse/LIBCLOUD-%s' GITHUB_URL = 'https://github.com/apache/libcloud/pull/%s' def parse_changes_file(file_path, versions=None): """ Parse CHANGES file and return a dictionary with contributors. Dictionary maps contributor name to the JIRA tickets or Github pull requests the user has worked on. """ # Maps contributor name to a list of JIRA tickets contributors_map = defaultdict(set) in_entry = False active_version = None active_tickets = [] with open(file_path, 'r') as fp: for line in fp: line = line.strip() match = re.search(r'Changes with Apache Libcloud ' '(\d+\.\d+\.\d+(-\w+)?).*?$', line) if match: active_version = match.groups()[0] if versions and active_version not in versions: continue if line.startswith('-') or line.startswith('*)'): in_entry = True active_tickets = [] if in_entry and line == '': in_entry = False if in_entry: match = re.search(r'\((.+?)\)$', line) if match: active_tickets = match.groups()[0] active_tickets = active_tickets.split(', ') active_tickets = [ticket for ticket in active_tickets if ticket.startswith('LIBCLOUD-') or ticket.startswith('GITHUB-')] match = re.search(r'^\[(.+?)\]$', line) if match: contributors = match.groups()[0] contributors = contributors.split(',') contributors = [name.strip() for name in contributors] for name in contributors: name = name.title() contributors_map[name].update(set(active_tickets)) return contributors_map def convert_to_markdown(contributors_map, include_tickets=False): # Contributors are sorted in ascending lexiographical order based on their # last name def compare(item1, item2): lastname1 = item1.split(' ')[-1].lower() lastname2 = item2.split(' ')[-1].lower() return cmp(lastname1, lastname2) names = contributors_map.keys() names = sorted(names, cmp=compare) result = [] for name in names: tickets = contributors_map[name] tickets_string = [] for ticket in tickets: if '-' not in ticket: # Invalid ticket number continue number = ticket.split('-')[1] if ticket.startswith('LIBCLOUD-'): url = JIRA_URL % (number) elif ticket.startswith('GITHUB-') or ticket.startswith('GH-'): url = GITHUB_URL % (number) values = {'ticket': ticket, 'url': url} tickets_string.append('[%(ticket)s](%(url)s)' % values) tickets_string = ', '.join(tickets_string) if include_tickets: line = '* %(name)s: %(tickets)s' % {'name': name, 'tickets': tickets_string} else: line = '* %(name)s' % {'name': name} result.append(line.strip()) result = '\n'.join(result) return result if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') parser.add_argument('--changes-path', action='store', required=True, help='Path to the changes file') parser.add_argument('--versions', action='store', nargs='+', type=str, help='Only return contributors for the provided ' 'versions') parser.add_argument('--include-tickets', action='store_true', default=False, help='Include ticket numbers') args = parser.parse_args() contributors_map = parse_changes_file(file_path=args.changes_path, versions=args.versions) markdown = convert_to_markdown(contributors_map=contributors_map, include_tickets=args.include_tickets) print(markdown) apache-libcloud-2.2.1/contrib/pre-commit.sh0000775000175000017500000000211612701023453020466 0ustar kamikami00000000000000#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. files=$(git diff --cached --name-status | grep -v ^D | awk '$1 $2 { print $2}' | grep -e .py$) array=(${files/// }) for file in "${array[@]}" do if [[ ${file} =~ "libcloud/test/" ]]; then flake8 --max-line-length=160 ${file} else flake8 ${file} fi done apache-libcloud-2.2.1/contrib/utils_test.py0000664000175000017500000000344012701023453020625 0ustar kamikami00000000000000#!/usr/bin/python # # Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ import simplejson import unittest import utils class SplitStringToAlphaNumTest(unittest.TestCase): def testInitial(self): self.assertEqual(utils.splitStringWithNumbers('12-abc'), [12, '-abc']) def testMiddle(self): self.assertEqual(utils.splitStringWithNumbers('abc-345-def'), ['abc-', 345, '-def']) def testFinal(self): self.assertEqual(utils.splitStringWithNumbers('xyz-42'), ['xyz-', 42]) def testMultiple(self): self.assertEqual(utils.splitStringWithNumbers('Aaa-123-Bbb-456-Ccc'), ['Aaa-', 123, '-Bbb-', 456, '-Ccc']) class SortKeysNumericallyTest(unittest.TestCase): def testSimple(self): input = { 'a-1': 1, 'a-12': 12, 'a-2': 2, } output = """\ { "a-1": 1, "a-2": 2, "a-12": 12 }\ """ self.assertEqual( simplejson.dumps(input, indent=4 * ' ', item_sort_key=utils.sortKeysNumerically), output) if __name__ == '__main__': unittest.main() apache-libcloud-2.2.1/contrib/run_tests.sh0000775000175000017500000000240412701023453020440 0ustar kamikami00000000000000#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # The script needs docker to be installed. # # Script which builds a testing docker image and container, # install the necessary packages: pypy, pip, python versions # 2.6, 2.7, 3.2, 3.3, 3.4 (corresponding dev packages as well) # and run the tox tests within the container. # The script has to be run from libcloud root. set -e docker build -f contrib/Dockerfile -t libcloud_runtest_img . docker run --rm --name libcloud_runtest_instance libcloud_runtest_img apache-libcloud-2.2.1/contrib/trigger_rtd_build.py0000775000175000017500000000171212701023453022124 0ustar kamikami00000000000000#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'https://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read() apache-libcloud-2.2.1/contrib/generate_provider_feature_matrix_table.py0000775000175000017500000004450213154002631026405 0ustar kamikami00000000000000#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import os import sys import inspect from collections import OrderedDict from os.path import join as pjoin this_dir = os.path.abspath(os.path.split(__file__)[0]) sys.path.insert(0, os.path.join(this_dir, '../')) from libcloud.compute .base import NodeDriver from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.providers import DRIVERS as COMPUTE_DRIVERS from libcloud.compute.types import Provider as ComputeProvider from libcloud.loadbalancer.base import Driver as LBDriver from libcloud.loadbalancer.providers import get_driver as get_lb_driver from libcloud.loadbalancer.providers import DRIVERS as LB_DRIVERS from libcloud.loadbalancer.types import Provider as LBProvider from libcloud.storage.base import StorageDriver from libcloud.storage.providers import get_driver as get_storage_driver from libcloud.storage.providers import DRIVERS as STORAGE_DRIVERS from libcloud.storage.types import Provider as StorageProvider from libcloud.dns.base import DNSDriver from libcloud.dns.providers import get_driver as get_dns_driver from libcloud.dns.providers import DRIVERS as DNS_DRIVERS from libcloud.dns.types import Provider as DNSProvider from libcloud.container.base import ContainerDriver from libcloud.container.providers import get_driver as get_container_driver from libcloud.container.providers import DRIVERS as CONTAINER_DRIVERS from libcloud.container.types import Provider as ContainerProvider from libcloud.backup.base import BackupDriver from libcloud.backup.providers import get_driver as get_backup_driver from libcloud.backup.providers import DRIVERS as BACKUP_DRIVERS from libcloud.backup.types import Provider as BackupProvider HEADER = ('.. NOTE: This file has been generated automatically using ' 'generate_provider_feature_matrix_table.py script, don\'t manually ' 'edit it') BASE_API_METHODS = { 'compute_main': ['list_nodes', 'create_node', 'reboot_node', 'destroy_node', 'list_images', 'list_sizes', 'deploy_node'], 'compute_image_management': ['list_images', 'get_image', 'create_image', 'delete_image', 'copy_image'], 'compute_block_storage': ['list_volumes', 'create_volume', 'destroy_volume', 'attach_volume', 'detach_volume', 'list_volume_snapshots', 'create_volume_snapshot'], 'compute_key_pair_management': ['list_key_pairs', 'get_key_pair', 'create_key_pair', 'import_key_pair_from_string', 'import_key_pair_from_file', 'delete_key_pair'], 'loadbalancer': ['create_balancer', 'list_balancers', 'balancer_list_members', 'balancer_attach_member', 'balancer_detach_member', 'balancer_attach_compute_node'], 'storage_main': ['list_containers', 'list_container_objects', 'iterate_containers', 'iterate_container_objects', 'create_container', 'delete_container', 'upload_object', 'upload_object_via_stream', 'download_object', 'download_object_as_stream', 'delete_object'], 'storage_cdn': ['enable_container_cdn', 'enable_object_cdn', 'get_container_cdn_url', 'get_object_cdn_url'], 'dns': ['list_zones', 'list_records', 'iterate_zones', 'iterate_records', 'create_zone', 'update_zone', 'create_record', 'update_record', 'delete_zone', 'delete_record'], 'container': ['install_image', 'list_images', 'deploy_container', 'get_container', 'start_container', 'stop_container', 'restart_container', 'destroy_container', 'list_containers', 'list_locations', 'create_cluster', 'destroy_cluster', 'list_clusters'], 'backup': ['get_supported_target_types', 'list_targets', 'create_target', 'create_target_from_node', 'create_target_from_storage_container', 'update_target', 'delete_target', 'list_recovery_points', 'recover_target', 'recover_target_out_of_place', 'list_target_jobs', 'create_target_job', 'resume_target_job', 'suspend_target_job', 'cancel_target_job'] } FRIENDLY_METHODS_NAMES = { 'compute_main': { 'list_nodes': 'list nodes', 'create_node': 'create node', 'reboot_node': 'reboot node', 'destroy_node': 'destroy node', 'list_images': 'list images', 'list_sizes': 'list sizes', 'deploy_node': 'deploy node' }, 'compute_image_management': { 'list_images': 'list images', 'get_image': 'get image', 'create_image': 'create image', 'copy_image': 'copy image', 'delete_image': 'delete image' }, 'compute_block_storage': { 'list_volumes': 'list volumes', 'create_volume': 'create volume', 'destroy_volume': 'destroy volume', 'attach_volume': 'attach volume', 'detach_volume': 'detach volume', 'list_volume_snapshots': 'list snapshots', 'create_volume_snapshot': 'create snapshot' }, 'compute_key_pair_management': { 'list_key_pairs': 'list key pairs', 'get_key_pair': 'get key pair', 'create_key_pair': 'create key pair', 'import_key_pair_from_string': 'import public key from string', 'import_key_pair_from_file': 'import public key from file', 'delete_key_pair': 'delete key pair' }, 'loadbalancer': { 'create_balancer': 'create balancer', 'list_balancers': 'list balancers', 'balancer_list_members': 'list members', 'balancer_attach_member': 'attach member', 'balancer_detach_member': 'detach member', 'balancer_attach_compute_node': 'attach compute node' }, 'storage_main': { 'list_containers': 'list containers', 'list_container_objects': 'list objects', 'create_container': 'create container', 'delete_container': 'delete container', 'upload_object': 'upload object', 'upload_object_via_stream': 'streaming object upload', 'download_object': 'download object', 'download_object_as_stream': 'streaming object download', 'delete_object': 'delete object' }, 'storage_cdn': { 'enable_container_cdn': 'enable container cdn', 'enable_object_cdn': 'enable object cdn', 'get_container_cdn_url': 'get container cdn URL', 'get_object_cdn_url': 'get object cdn URL', }, 'dns': { 'list_zones': 'list zones', 'list_records': 'list records', 'create_zone': 'create zone', 'update_zone': 'update zone', 'create_record': 'create record', 'update_record': 'update record', 'delete_zone': 'delete zone', 'delete_record': 'delete record' }, 'container': { 'install_image': 'install image', 'list_images': 'list images', 'deploy_container': 'deploy container', 'get_container': 'get container', 'list_containers': 'list containers', 'start_container': 'start container', 'stop_container': 'stop container', 'restart_container': 'restart container', 'destroy_container': 'destroy container', 'list_locations': 'list locations', 'create_cluster': 'create cluster', 'destroy_cluster': 'destroy cluster', 'list_clusters': 'list clusters' }, 'backup': { 'get_supported_target_types': 'get supported target types', 'list_targets': 'list targets', 'create_target': 'create target', 'create_target_from_node': 'create target from node', 'create_target_from_storage_container': 'create target from storage container', 'update_target': 'update target', 'delete_target': 'delete target', 'list_recovery_points': 'list recovery points', 'recover_target': 'recover target', 'recover_target_out_of_place': 'recover target out of place', 'list_target_jobs': 'list target jobs', 'create_target_job': 'create target job', 'resume_target_job': 'resume target job', 'suspend_target_job': 'suspend target job', 'cancel_target_job': 'cancel target job' } } IGNORED_PROVIDERS = [ 'dummy', # Deprecated constants 'cloudsigma_us', 'cloudfiles_swift' ] def get_provider_api_names(Provider): names = [key for key, value in Provider.__dict__.items() if not key.startswith('__')] return names def generate_providers_table(api): result = {} if api in ['compute_main', 'compute_image_management', 'compute_block_storage', 'compute_key_pair_management']: driver = NodeDriver drivers = COMPUTE_DRIVERS provider = ComputeProvider get_driver_method = get_compute_driver elif api == 'loadbalancer': driver = LBDriver drivers = LB_DRIVERS provider = LBProvider get_driver_method = get_lb_driver elif api in ['storage_main', 'storage_cdn']: driver = StorageDriver drivers = STORAGE_DRIVERS provider = StorageProvider get_driver_method = get_storage_driver elif api == 'dns': driver = DNSDriver drivers = DNS_DRIVERS provider = DNSProvider get_driver_method = get_dns_driver elif api == 'container': driver = ContainerDriver drivers = CONTAINER_DRIVERS provider = ContainerProvider get_driver_method = get_container_driver elif api == 'backup': driver = BackupDriver drivers = BACKUP_DRIVERS provider = BackupProvider get_driver_method = get_backup_driver else: raise Exception('Invalid api: %s' % (api)) names = get_provider_api_names(provider) result = OrderedDict() for name in names: enum = getattr(provider, name) try: cls = get_driver_method(enum) except Exception as e: # Deprecated providers throw an exception print('Ignoring deprecated constant "%s": %s' % (enum, str(e))) continue # Hack for providers which expose multiple classes and support multiple # API versions # TODO: Make entry per version if name.lower() == 'cloudsigma': from libcloud.compute.drivers.cloudsigma import \ CloudSigma_2_0_NodeDriver cls = CloudSigma_2_0_NodeDriver elif name.lower() == 'opennebula': from libcloud.compute.drivers.opennebula import \ OpenNebula_3_8_NodeDriver cls = OpenNebula_3_8_NodeDriver elif name.lower() == 'digital_ocean' and api.startswith('compute'): from libcloud.compute.drivers.digitalocean import \ DigitalOcean_v2_NodeDriver cls = DigitalOcean_v2_NodeDriver if name.lower() in IGNORED_PROVIDERS: continue driver_methods = dict(inspect.getmembers(cls, predicate=inspect.isfunction)) base_methods = dict(inspect.getmembers(driver, predicate=inspect.isfunction)) base_api_methods = BASE_API_METHODS[api] result[name] = {'name': cls.name, 'website': cls.website, 'constant': name, 'module': drivers[enum][0], 'class': drivers[enum][1], 'cls': cls, 'methods': {}} for method_name in base_api_methods: base_method = base_methods[method_name] driver_method = driver_methods[method_name] if method_name == 'deploy_node': features = getattr(cls, 'features', {}).get('create_node', []) is_implemented = len(features) >= 1 else: is_implemented = (id(driver_method) != id(base_method)) result[name]['methods'][method_name] = is_implemented return result def generate_rst_table(data): cols = len(data[0]) col_len = [max(len(r[i]) for r in data) for i in range(cols)] formatter = ' '.join('{:<%d}' % c for c in col_len) header = formatter.format(*['=' * c for c in col_len]) rows = [formatter.format(*row) for row in data] result = header + '\n' + rows[0] + '\n' + header + '\n' +\ '\n'.join(rows[1:]) + '\n' + header return result def generate_supported_methods_table(api, provider_matrix): base_api_methods = BASE_API_METHODS[api] data = [] header = [FRIENDLY_METHODS_NAMES[api][method_name] for method_name in base_api_methods if not method_name.startswith('iterate_')] data.append(['Provider'] + header) for provider, values in sorted(provider_matrix.items()): provider_name = '`%s`_' % (values['name']) row = [provider_name] # TODO: Make it nicer # list_* methods don't need to be implemented if iterate_* methods are # implemented if api == 'storage_main': if values['methods']['iterate_containers']: values['methods']['list_containers'] = True if values['methods']['iterate_container_objects']: values['methods']['list_container_objects'] = True elif api == 'dns': # list_zones and list_records don't need to be implemented if if values['methods']['iterate_zones']: values['methods']['list_zones'] = True if values['methods']['iterate_records']: values['methods']['list_records'] = True for method in base_api_methods: # TODO: ghetto if method.startswith('iterate_'): continue supported = values['methods'][method] if supported: row.append('yes') else: row.append('no') data.append(row) result = generate_rst_table(data) result += '\n\n' for provider, values in sorted(provider_matrix.items()): result += '.. _`%s`: %s\n' % (values['name'], values['website']) return result def generate_supported_providers_table(api, provider_matrix): data = [] header = ['Provider', 'Documentation', 'Provider Constant', 'Supported Regions', 'Module', 'Class Name'] data.append(header) for provider, values in sorted(provider_matrix.items()): name_str = '`%s`_' % (values['name']) module_str = ':mod:`%s`' % (values['module']) class_str = ':class:`%s`' % (values['class']) params = {'api': api, 'provider': provider.lower()} driver_docs_path = pjoin(this_dir, '../docs/%(api)s/drivers/%(provider)s.rst' % params) if os.path.exists(driver_docs_path): docs_link = ':doc:`Click `' % params else: docs_link = '' cls = values['cls'] supported_regions = cls.list_regions() if hasattr(cls, 'list_regions') \ else None if supported_regions: # Sort the regions to achieve stable output supported_regions = sorted(supported_regions) supported_regions = ', '.join(supported_regions) else: supported_regions = 'single region driver' row = [name_str, docs_link, values['constant'], supported_regions, module_str, class_str] data.append(row) result = generate_rst_table(data) result += '\n\n' for provider, values in sorted(provider_matrix.items()): result += '.. _`%s`: %s\n' % (values['name'], values['website']) return result def generate_tables(): apis = BASE_API_METHODS.keys() for api in apis: result = generate_providers_table(api) docs_dir = api if api.startswith('compute'): docs_dir = 'compute' elif api.startswith('storage'): docs_dir = 'storage' supported_providers = generate_supported_providers_table(docs_dir, result) supported_methods = generate_supported_methods_table(api, result) current_path = os.path.dirname(__file__) target_dir = os.path.abspath(pjoin(current_path, '../docs/%s/' % (docs_dir))) file_name_1 = '_supported_providers.rst' file_name_2 = '_supported_methods.rst' if api == 'compute_main': file_name_2 = '_supported_methods_main.rst' elif api == 'compute_image_management': file_name_2 = '_supported_methods_image_management.rst' elif api == 'compute_block_storage': file_name_2 = '_supported_methods_block_storage.rst' elif api == 'compute_key_pair_management': file_name_2 = '_supported_methods_key_pair_management.rst' elif api == 'storage_main': file_name_2 = '_supported_methods_main.rst' elif api == 'storage_cdn': file_name_2 = '_supported_methods_cdn.rst' supported_providers_path = pjoin(target_dir, file_name_1) supported_methods_path = pjoin(target_dir, file_name_2) with open(supported_providers_path, 'w') as fp: fp.write(HEADER + '\n\n') fp.write(supported_providers) with open(supported_methods_path, 'w') as fp: fp.write(HEADER + '\n\n') fp.write(supported_methods) generate_tables() apache-libcloud-2.2.1/contrib/apply-patch.ps10000664000175000017500000000063213153541406020730 0ustar kamikami00000000000000param ($pull_request) Write-Host "Applying patch ${pull_request}" git checkout trunk Invoke-WebRequest https://patch-diff.githubusercontent.com/raw/apache/libcloud/pull/${pull_request}.patch -OutFile ${env:temp}/${pull_request}.patch git am ${env:temp}/${pull_request}.patch $last_message = git log -1 --pretty=%B $new_message = $last_message+" Closes #${pull_request}" git commit --amend -m "${new_message}" apache-libcloud-2.2.1/contrib/scrape-ec2-prices.py0000775000175000017500000001510513153541406021646 0ustar kamikami00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import re import json import time from collections import defaultdict, OrderedDict import requests import demjson LINUX_PRICING_URLS = [ # Deprecated instances (JSON format) 'https://aws.amazon.com/ec2/pricing/json/linux-od.json', # Previous generation instances (JavaScript file) 'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js', # New generation instances (JavaScript file) 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js' ] EC2_REGIONS = [ 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'us-gov-west-1', 'eu-west-1', 'eu-west-2', 'eu-central-1', 'ca-central-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ap-northeast-2', 'ap-south-1', 'sa-east-1', 'cn-north-1', ] EC2_INSTANCE_TYPES = [ 't1.micro', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'cg1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'cr1.8xlarge', 'hs1.4xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16large', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 't2.micro', 't2.small', 't2.medium', 't2.large', 'x1.32xlarge' ] # Maps EC2 region name to region name used in the pricing file REGION_NAME_MAP = { 'us-east': 'ec2_us_east', 'us-east-1': 'ec2_us_east', 'us-east-2': 'ec2_us_east_ohio', 'us-west': 'ec2_us_west', 'us-west-1': 'ec2_us_west', 'us-west-2': 'ec2_us_west_oregon', 'eu-west-1': 'ec2_eu_west', 'eu-west-2': 'ec2_eu_west_london', 'eu-ireland': 'ec2_eu_west', 'eu-central-1': 'ec2_eu_central', 'ca-central-1': 'ec2_ca_central_1', 'apac-sin': 'ec2_ap_southeast', 'ap-southeast-1': 'ec2_ap_southeast', 'apac-syd': 'ec2_ap_southeast_2', 'ap-southeast-2': 'ec2_ap_southeast_2', 'apac-tokyo': 'ec2_ap_northeast', 'ap-northeast-1': 'ec2_ap_northeast', 'ap-northeast-2': 'ec2_ap_northeast', 'ap-south-1': 'ec2_ap_south_1', 'sa-east-1': 'ec2_sa_east', 'us-gov-west-1': 'ec2_us_govwest', 'cn-north-1': 'ec2_cn_north', } INSTANCE_SIZES = [ 'micro', 'small', 'medium', 'large', 'xlarge', 'x-large', 'extra-large' ] RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))') BASE_PATH = os.path.dirname(os.path.abspath(__file__)) PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json') PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH) def scrape_ec2_pricing(): result = defaultdict(OrderedDict) for url in LINUX_PRICING_URLS: response = requests.get(url) if re.match('.*?\.json$', url): data = response.json() elif re.match('.*?\.js$', url): data = response.content match = re.match('^.*callback\((.*?)\);?$', data, re.MULTILINE | re.DOTALL) data = match.group(1) # demjson supports non-strict mode and can parse unquoted objects data = demjson.decode(data) regions = data['config']['regions'] for region_data in regions: region_name = region_data['region'] libcloud_region_name = REGION_NAME_MAP[region_name] instance_types = region_data['instanceTypes'] for instance_type in instance_types: sizes = instance_type['sizes'] for size in sizes: price = size['valueColumns'][0]['prices']['USD'] if str(price).lower() == 'n/a': # Price not available continue result[libcloud_region_name][size['size']] = float(price) return result def update_pricing_file(pricing_file_path, pricing_data): with open(pricing_file_path, 'r') as fp: content = fp.read() data = json.loads(content) data['updated'] = int(time.time()) data['compute'].update(pricing_data) # Always sort the pricing info data = sort_nested_dict(data) content = json.dumps(data, indent=4) lines = content.splitlines() lines = [line.rstrip() for line in lines] content = '\n'.join(lines) with open(pricing_file_path, 'w') as fp: fp.write(content) def sort_nested_dict(value): """ Recursively sort a nested dict. """ result = OrderedDict() for key, value in sorted(value.items(), key=sort_key_by_numeric_other): if isinstance(value, (dict, OrderedDict)): result[key] = sort_nested_dict(value) else: result[key] = value return result def sort_key_by_numeric_other(key_value): """ Split key into numeric, alpha and other part and sort accordingly. """ return tuple(( int(numeric) if numeric else None, INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha, other ) for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0])) def main(): print('Scraping EC2 pricing data') pricing_data = scrape_ec2_pricing() update_pricing_file(pricing_file_path=PRICING_FILE_PATH, pricing_data=pricing_data) print('Pricing data updated') if __name__ == '__main__': main() apache-libcloud-2.2.1/contrib/Dockerfile0000664000175000017500000000153312701023453020047 0ustar kamikami00000000000000FROM ubuntu:14.04 RUN set -e && \ apt-get update && \ apt-get install -y \ software-properties-common \ wget \ ssh && \ add-apt-repository ppa:fkrull/deadsnakes && \ apt-get update && \ apt-get -y install \ python2.6 \ python2.7 \ python3.2 \ python3.3 \ python3.4 \ python3.5 \ python-dev \ python2.6-dev \ python2.7-dev \ python3.2-dev \ python3.3-dev \ python3.4-dev \ python3.5-dev \ python-pip RUN set -e && \ wget https://bitbucket.org/pypy/pypy/downloads/pypy-2.5.0-linux64.tar.bz2 && \ tar xf ./pypy-2.5.0-linux64.tar.bz2 -C /opt && \ ln -s /opt/pypy-2.5.0-linux64/bin/pypy /usr/local/bin/pypy RUN set -e && \ pip install tox \ mock \ lockfile \ coverage COPY . /libcloud WORKDIR /libcloud CMD tox -e py2.6,py2.7,pypypy,py3.2,py3.3,py3.4,py3.5,lint apache-libcloud-2.2.1/README.rst0000664000175000017500000000566513155440576016133 0ustar kamikami00000000000000Apache Libcloud - a unified interface for the cloud ==================================================== .. image:: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat :target: https://libcloud.readthedocs.org .. image:: https://img.shields.io/pypi/v/apache-libcloud.svg :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://img.shields.io/pypi/dm/apache-libcloud.svg :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://img.shields.io/travis/apache/libcloud/trunk.svg :target: http://travis-ci.org/apache/libcloud .. image:: https://img.shields.io/pypi/pyversions/apache-libcloud.svg :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://img.shields.io/pypi/wheel/apache-libcloud.svg :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://img.shields.io/github/license/apache/libcloud.svg :target: https://github.com/apache/libcloud/blob/trunk/LICENSE .. image:: https://img.shields.io/irc/%23libcloud.png :target: http://webchat.freenode.net/?channels=libcloud .. image:: https://bestpractices.coreinfrastructure.org/projects/152/badge :target: https://bestpractices.coreinfrastructure.org/projects/152 .. image:: https://img.shields.io/codecov/c/github/apache/libcloud/trunk.svg :target: https://codecov.io/github/apache/libcloud?branch=trunk Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows you to manage different cloud resources through a unified and easy to use API. Resources you can manage with Libcloud are divided into the following categories: * **Compute** - Cloud Servers and Block Storage - services such as Amazon EC2 and Rackspace Cloud Servers (``libcloud.compute.*``) * **Storage** - Cloud Object Storage and CDN - services such as Amazon S3 and Rackspace CloudFiles (``libcloud.storage.*``) * **Load Balancers** - Load Balancers as a Service, LBaaS (``libcloud.loadbalancer.*``) * **DNS** - DNS as a Service, DNSaaS (``libcloud.dns.*``) * **Container** - Container virtualization services (``libcloud.container.*``) Apache Libcloud is an Apache project, see for more information. Documentation ============= Documentation can be found at . Feedback ======== Please send feedback to the mailing list at , or the JIRA at . Contributing ============ For information on how to contribute, please see the Contributing chapter in our documentation License ======= Apache Libcloud is licensed under the Apache 2.0 license. For more information, please see LICENSE_ and NOTICE_ file. .. _LICENSE: https://github.com/apache/libcloud/blob/trunk/LICENSE .. _NOTICE: https://github.com/apache/libcloud/blob/trunk/NOTICE apache-libcloud-2.2.1/requirements-tests.txt0000664000175000017500000000021713155437715021055 0ustar kamikami00000000000000pep8>=1.7.0,<1.8 flake8>=2.5.1,<2.6 astroid>=1.4.5,<1.5 pylint>=1.5.5,<1.6 mock>=1.0.1,<1.1 codecov coverage<4.0 requests requests_mock pytest apache-libcloud-2.2.1/MANIFEST.in0000664000175000017500000000145613160513663016166 0ustar kamikami00000000000000include LICENSE include NOTICE include example_*.py include CHANGES.rst include README.rst include tox.ini include .pylintrc include requirements-tests.txt include libcloud/data/pricing.json prune libcloud/test/secrets.py include demos/* include scripts/check_file_names.sh include libcloud/test/*.py include libcloud/test/pricing_test.json include libcloud/test/secrets.py-dist include libcloud/test/common/*.py include libcloud/test/compute/*.py include libcloud/test/storage/*.py include libcloud/test/loadbalancer/*.py include libcloud/test/dns/*.py include libcloud/test/common/fixtures/*/* include libcloud/test/compute/fixtures/*/* include libcloud/test/compute/fixtures/*/*/* include libcloud/test/storage/fixtures/*/* include libcloud/test/loadbalancer/fixtures/*/* include libcloud/test/dns/fixtures/*/* apache-libcloud-2.2.1/example_storage.py0000664000175000017500000000221512701023453020144 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pprint import pprint from libcloud.storage.types import Provider from libcloud.storage.providers import get_driver CloudFiles = get_driver(Provider.CLOUDFILES) driver = CloudFiles('access key id', 'secret key', region='ord') containers = driver.list_containers() container_objects = driver.list_container_objects(containers[0]) pprint(containers) pprint(container_objects) apache-libcloud-2.2.1/example_loadbalancer.py0000664000175000017500000000466312701023453021120 0ustar kamikami00000000000000#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.types import Provider, State from libcloud.loadbalancer.providers import get_driver def main(): cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') balancers = driver.list_balancers() print(balancers) # creating a balancer which balances traffic across two # nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer # itself listens on port 80/tcp new_balancer_name = 'testlb' + os.urandom(4).encode('hex') members = (Member(None, '192.168.86.1', 80), Member(None, '192.168.86.2', 8080)) new_balancer = driver.create_balancer(name=new_balancer_name, algorithm=Algorithm.ROUND_ROBIN, port=80, protocol='http', members=members) print(new_balancer) # wait for balancer to become ready # NOTE: in real life code add timeout to not end up in # endless loop when things go wrong on provider side while True: balancer = driver.get_balancer(balancer_id=new_balancer.id) if balancer.state == State.RUNNING: break print('sleeping for 30 seconds for balancers to become ready') time.sleep(30) # fetch list of members members = balancer.list_members() print(members) # remove first member balancer.detach_member(members[0]) # remove the balancer driver.destroy_balancer(new_balancer) if __name__ == '__main__': main() apache-libcloud-2.2.1/scripts/0000775000175000017500000000000013160535110016100 5ustar kamikami00000000000000apache-libcloud-2.2.1/scripts/check_file_names.sh0000775000175000017500000000242613160270245021707 0ustar kamikami00000000000000#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Script which checks there are no files which name is longer # than the allowed limit # ext4 support file name up to 255 characters long, but layering # ecrypt on top of it drops the limit to 143 characters FILE_NAME_LENGTH_LIMIT=143 FILES=$(find libcloud/ -regextype posix-egrep -regex ".*[^/]{${FILE_NAME_LENGTH_LIMIT},}") if [ "${FILES}" ]; then echo "Found files which name is longer than ${FILE_NAME_LENGTH_LIMIT} characters" echo "${FILES}" exit 1 fi exit 0 apache-libcloud-2.2.1/PKG-INFO0000664000175000017500000000245613160535110015515 0ustar kamikami00000000000000Metadata-Version: 1.1 Name: apache-libcloud Version: 2.2.1 Summary: A standard Python library that abstracts away differences among multiple cloud provider APIs. For more information and documentation, please see http://libcloud.apache.org Home-page: http://libcloud.apache.org/ Author: Apache Software Foundation Author-email: dev@libcloud.apache.org License: Apache License (2.0) Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy apache-libcloud-2.2.1/setup.py0000664000175000017500000001350213153541406016133 0ustar kamikami00000000000000# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from setuptools import setup from distutils.core import Command from os.path import join as pjoin try: import epydoc # NOQA has_epydoc = True except ImportError: has_epydoc = False import libcloud.utils # NOQA from libcloud.utils.dist import get_packages, get_data_files # NOQA libcloud.utils.SHOW_DEPRECATION_WARNING = False # Different versions of python have different requirements. We can't use # libcloud.utils.py3 here because it relies on backports dependency being # installed / available PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY2_pre_25 = PY2 and sys.version_info < (2, 5) PY2_pre_26 = PY2 and sys.version_info < (2, 6) PY2_pre_27 = PY2 and sys.version_info < (2, 7) PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9) PY3_pre_32 = PY3 and sys.version_info < (3, 2) HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/libcloud/trunk' PROJECT_BASE_DIR = 'http://libcloud.apache.org' TEST_PATHS = ['libcloud/test', 'libcloud/test/common', 'libcloud/test/compute', 'libcloud/test/storage', 'libcloud/test/loadbalancer', 'libcloud/test/dns', 'libcloud/test/container', 'libcloud/test/backup'] DOC_TEST_MODULES = ['libcloud.compute.drivers.dummy', 'libcloud.storage.drivers.dummy', 'libcloud.dns.drivers.dummy', 'libcloud.container.drivers.dummy', 'libcloud.backup.drivers.dummy'] SUPPORTED_VERSIONS = ['2.6', '2.7', 'PyPy', '3.x'] TEST_REQUIREMENTS = [ 'mock', 'requests', 'requests_mock', 'pytest', 'pytest-runner' ] if PY2_pre_279 or PY3_pre_32: TEST_REQUIREMENTS.append('backports.ssl_match_hostname') if PY2_pre_27: unittest2_required = True else: unittest2_required = False if PY2_pre_25: version = '.'.join([str(x) for x in sys.version_info[:3]]) print('Version ' + version + ' is not supported. Supported versions are ' + ', '.join(SUPPORTED_VERSIONS)) sys.exit(1) def read_version_string(): version = None sys.path.insert(0, pjoin(os.getcwd())) from libcloud import __version__ version = __version__ sys.path.pop(0) return version def forbid_publish(): argv = sys.argv if 'upload'in argv: print('You shouldn\'t use upload command to upload a release to PyPi. ' 'You need to manually upload files generated using release.sh ' 'script.\n' 'For more information, see "Making a release section" in the ' 'documentation') sys.exit(1) class ApiDocsCommand(Command): description = "generate API documentation" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): if not has_epydoc: raise RuntimeError('Missing "epydoc" package!') os.system( 'pydoctor' ' --add-package=libcloud' ' --project-name=libcloud' ' --make-html' ' --html-viewsource-base="%s"' ' --project-base-dir=`pwd`' ' --project-url="%s"' % (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR)) forbid_publish() install_requires = ['requests'] if PY2_pre_26: install_requires.extend(['ssl', 'simplejson']) if PY2_pre_279 or PY3_pre_32: install_requires.append('backports.ssl_match_hostname') setup( name='apache-libcloud', version=read_version_string(), description='A standard Python library that abstracts away differences' + ' among multiple cloud provider APIs. For more information' + ' and documentation, please see http://libcloud.apache.org', author='Apache Software Foundation', author_email='dev@libcloud.apache.org', install_requires=install_requires, packages=get_packages('libcloud'), package_dir={ 'libcloud': 'libcloud', }, package_data={'libcloud': get_data_files('libcloud', parent='libcloud')}, license='Apache License (2.0)', url='http://libcloud.apache.org/', setup_requires=['pytest-runner'], tests_require=TEST_REQUIREMENTS, cmdclass={ 'apidocs': ApiDocsCommand, }, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy'] ) apache-libcloud-2.2.1/setup.cfg0000664000175000017500000000032013160535110016225 0ustar kamikami00000000000000[wheel] universal = 1 [nosetests] exclude = TestCaseMixin [aliases] test = pytest [tool:pytest] python_classes = *Test testpaths = libcloud/test [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 apache-libcloud-2.2.1/CHANGES.rst0000664000175000017500000044151313160510051016220 0ustar kamikami00000000000000Changelog ========= Changes in Apache Libcloud 2.2.1 -------------------------------- Common ~~~~~~ - Fix an issue with installation failing on some operating system and file systems combinations (e.g. ecryptfs layered on top of ext4) which don't support file names longer than 143 characters. (LIBCLOUD-946, GITHUB-1112) Reported by Cyrille Verrier. [Tomaz Muraus] Compute ~~~~~~~ - [EC2] add g3 instance types [GITHUB-1101] (@zulupro) - [EC2] add 'end' to ec2 reserved_node [GITHUB-1099] (@xofer) - Decrease sleep delay (from 1.5 to 0.2 seconds) inside paramiko client which is used to prevent busy waiting while waiting for data on the channel. This should cause deploy scripts which produce a lot of output in incremental manner to finish faster. [Tomaz Muraus] - Fix a regression in the Azure ARM driver which didn't allow custom storage URI suffix to be used with create_node. (GITHUB-1110) [Lucas Di Pentima] Tests ~~~~~ - Make sure we normalize header values and cast all the numbers to strings in base connection classes used by tests. (LIBCLOUD-945, GITHUB-1111) Reported by Erich Eckner. [Tomaz Muraus] Changes in Apache Libcloud 2.2.0 -------------------------------- Common ~~~~~~ - [GCE] Scrape prices for GCE Australia Region [GITHUB-1085] (Francisco Ros) Compute ~~~~~~~ - [ARM] Add option to create static public IP [GITHUB-1091, LIBCLOUD-918] (Aki Ketolainen) - [SOFTLAYER] Add `get_image` method to class [GITHUB-1066] (Francois Regnoult) - [ARM] Add Storage support, volumes, snapshots [GITHUB-1087] (Sergey Babak) Container ~~~~~~~~~ - [DOCKER] Fixes to support TLS connection [GITHUB-1067] (johnnyWalnut) DNS ~~~ - [ROUTE53] Fix for TXT and SPF records, when user didn't escapsulate data in quotes, the API would fire error. As reported by @glyph [LIBCLOUD-875, GITHUB-1093] (Anthony Shaw) - [LINODE] Add priority to the extra dictionary in record instances [GITHUB-1088] (@mete0r) Load Balancer ~~~~~~~~~~~~~ - Fixed AWS ALB/ELB driver init method to instantiate nested connection object properly [LIBCLOUD-936, GITHUB-1089] (Anton Kozyrev) Storage ~~~~~~~ - [CLOUDFILES] Update OpenStackSwiftConnection to work with auth version 3.0 [GITHUB-1068] (Hakan Carlsson) - [CLOUDFILES] Add SSL URI support [GITHUB-1076, LIBCLOUD-458] (@ayleph) Changes in Apache Libcloud 2.1.0 -------------------------------- Common ~~~~~~ - [AWS] Update prices and fix some region names [GITHUB-1056] (Francisco Ros) - Fix bug in utils.decorators wrap exception method, used by vsphere driver [GITHUB-1054] (Anthony Shaw) - Use PyTest as the unit testing runner (Anthony Shaw) - Use of LXML is now disabled by defalt, use libcloud.utils.py3.DEFAULT_LXML = True to reenable. LXML has compatibility issues with a number of drivers and etree is a standard package [GITHUB-1038] (Anthony Shaw) - Switch RawResponse class to use content body instead of text body, up to 10x performance improvement for methods like StorageDriver.download_object [GITHUB-1053] (Quentin Pradet) Compute ~~~~~~~ - [OPENSTACK] Add support for Nova 2.x and Keystone 3 [GITHUB-1052] (Anthony Shaw) - [GCE] Add loadBalancingScheme parameter for ex_create_forwarding_rule method in GCE driver. [GITHUB-1079] (@sT331h0rs3) - [GCE] Fix error codes not being parsed in certain scenarios [GITHUB-1074, LIBCLOUD-925] (micafer) - [EC2] Fix node's Block Device Mapping was parsed from incorrect mapping. EbsInstanceBlockDevice is different from EbsBlockDevice. [GITHUB-1075] (Gennadiy Stas) - [GANDI] Fixes the location name in image and instance type classes [GITHUB-1065] (Sayoun) - [GCE] Fix method for create instance properties, it previously ignored the disk type parameter and defaulted to pd-standard. [GITHUB-1064] (Evan Carter) - Fix missing return data from EC2 billing product methods [GITHUB-1062] (Alex Misstear) - Handle [VULTR] API rate limiting [GITHUB-1058] (Francisco Ros) - Fix Kili driver not correctly fixing the auth version for openstack to 2.0_password [GITHUB-1054] (Anthony Shaw) - [EC2] Add i3 instance types for AWS [GITHUB-1038] (Stephen Mullins) - [VULTR] Extend extra dict of Vultr sizes to include additional fields (plan_type and available_locations) [GITHUB-1044] (Francisco Ros) Container ~~~~~~~~~ - New driver for Google Container Engine [GITHUB-1059] (Andy Maheshwari) - [KUBERNETES] Fix get_container method responding with None [GITHUB-1054] (Anthony Shaw) - [DOCKER] Fix for start_container method [GITHUB-1049] (@johnnyWalnut) - [DOCKER] fix add an extra check otherwise list_containers breaks with AttributeError when fromImages is specified [GITHUB-1043] (@johnnyWalnut) Storage ~~~~~~~ - [S3] Fix raise in s3.upload_object_via_stream [LIBCLOUD-914, GITHUB-1055] (Quentin Pradet) Changes in Apache Libcloud 2.0.0 -------------------------------- Common ~~~~~~ - Fix OpenStack drivers not correctly setting URLs when used with identity API, would default to 127.0.0.1 and service catalog URLs were not adhered to. [GITHUB-1037, LIBCLOUD-912, LIBCLOUD-904] (Anthony Shaw) - Fix Aliyun ECS, Load balancer and storage adapters when using unicode UTF-8 characters in the names of resources in 2.0.0rc2 < it would fail as a MalformedResponseError, Python 2.7 element tree was raising a unicode error [GITHUB-1032] [GITHUB-994] (Anthony Shaw) - Refactor the test classes to use the full libcloud.http and libcloud.common.base modules, with Connection, Response all used with requests_mock. This increases our test coverages and catches bugs in drivers' custom parse_body and auth modules [GITHUB-1031] (Anthony Shaw) - Rename libcloud.httplib_ssl to libcloud.http now that we don't use httplib [GITHUB-1028] (Anthony Shaw) Compute ~~~~~~~ - [GOOGLE] Add test to check that can create a GCE volume at a given location [GITHUB-1048] (Francisco Ros) - [GOOGLE] Fix GCENodeDriver.ex_get_volume() when zone param is of class GCEZone or NodeLocation [GITHUB-1047] (Francisco Ros) - [GOOGLE] Fix call to GCENodeDriver._ex_populate_volume_dict [GITHUB-1046] (Francisco Ros) - [ARM] Add support for Azure Cloud Environments as well as Locations [GITHUB-969] (Peter Amstutz) - [EC2] Add support for ModifyVolume and DescribeVolumesModifications [GITHUB-1036] (Hennadii Stas) - [ARM] Fix string representation of the VhdImage type and fix listing of Public IP addresses [GITHUB-1035] (Anthony Shaw) - [GOOGLE] Remove validation checks for guestOsFeatures [GITHUB-1034] (Max Illfelder) - [VSPHERE] Fix issue with authentication methods crashing [GITHUB-1031] (Anthony Shaw) - [ARM] Add network security groups to azure ARM [GITHUB-1033] (Joseph Hall) - [ARM] Add the ability to list resource groups [GITHUB-1032] (Joseph Hall) - Add 1&1 compute driver [LIBCLOUD-911] [GITHUB-1029] (Jasmin Gacic) - Fix Azure ARM driver condition for ex_list_publishers where location is specified [GITHUB-1030] (Joseph Hall) - Added Import Snapshot and Describe Import Snapshot to EC2 compute driver [GITHUB-1023] (Nirzari Iyer) - Add price_monthly extra param to digitalocean sizes [GITHUB-1021] (Francisco Ros) - Add aliyun ecs instance join leave security group [GITHUB-992] (Jie Ren) - Add keypair management to OnApp driver [GITHUB-1018] (Tinu Cleatus) - Add missing regions in AWS storage and compute drivers [GITHUB-1019] (Alex Misstear) - Add SR-IOV net support to images in EC2 compute driver [GITHUB-1020] (Alex Misstear) - Fix - update t2.small image size from 11 CPU to 1 [GITHUB-1022] (Francisco Ros) - Added Billing Product for image in EC2 compute driver [GITHUB-1024] (Nirzari Iyer) DNS ~~~ - Add OnApp driver [GITHUB-1017] [LIBCLOUD-907] (Tinu Cleatus) Changes in Apache Libcloud 2.0.0rc2 ----------------------------------- Common ~~~~~~ - Fix LIBCLOUD_DEBUG trying to decompress already decompressed responses [LIBCLOUD-910] (Anthony Shaw) - Added an integration test API and a test suite for validating functionality without mocking any libcloud subsystems [GITHUB-970] (Anthony Shaw) - Fix for Linode classes since 2.0x [GITHUB-1026] (Anthony Shaw) - Fix CertificateConnection not correctly signing requests in 2.0rc1, impacted Azure classic driver, OpenStack and Docker driver [GITHUB-1015] (Anthony Shaw) - Change Cloudscale to cloudscale.ch. [GITHUB-993] (David Halter) - Explicitly check if response is None in RawResponse class [GITHUB-1006] [LIBCLOUD-901] (Richard Xia) Compute ~~~~~~~ - Outscale SAS doc improvements and logo update [GITHUB-950] (Javier M Mellid) - [GCE] Allow preemptible instances to be created [GITHUB-954] (John Baublitz) - Add support for forcing detachment of EBS volumes to EC2 driver [GITHUB-1007] (Sergey Babak) - Fix Public IP not assigned when creating NIC on Azure ARM [GITHUB-1013] [LIBCLOUD-906] (Simone Ripamonti) - [ONAPP] Add list images support for OnApp driver [GITHUB-1011] (Tinu Cleatus) - [EC2] Add r4 instance types for AWS [GITHUB-997] (Jens Deppe) - [EC2] support for AWS eu-west-2 and ca-central-1 regions [GITHUB-1009] (Marat Komarov) - [EC2] Add P2 GPU instance types [GITHUB-996] (MJK) - [EC2] Add method to modify snapshot attribute for EC2 [GITHUB-990] (Sayan Chowdhury) - [Linode] Add start, stop instance methods and fix incorrect state TERMINATED to STOPPED [GITHUB-986] (Markos Gogoulos) - [EC2] Add ENA support for EC2 compute images [GITHUB-983] (Alex Misstear) - [Azure ARM] fix typeerror on ex_list_nics [GITHUB-979] (Choi Jongu) - [GCE] allow delete instances from managed group [GITHUB-975] (@zacharya19) Storage ~~~~~~~ - Reintroduce S3 multipart upload support with signature v4 [GITHUB-1005] [LIBCLOUD-834] (Alex Misstear) Changes Apache Libcloud 2.0.0rc1 -------------------------------- Common ~~~~~~ - Fix DEBUG mode, also add support for using io.StringIO as the file handle when calling libcloud.enable_debug. (GITHUB-978, LIBCLOUD-887) [Anthony Shaw] - Introduction of the requests package as the mechanism for making HTTP requests for all drivers. (GITHUB-928) [Anthony Shaw] - Fix bug where custom port and secure flag would not get propagated to connection class. (GITHUB-972) [Anthony Shaw] - Fix bug where custom port would not get propagated to connection. (GITHUB-971) [Anthony Shaw] - Fix bug where instantiating a connection from URL and then requesting an action with a leading / would lead to a malformed URL. (GITHUB-976) [Anthony Shaw] Compute ~~~~~~~ - Fix a bug in profitbricks driver where listing snapshots would request a malformed URL. [GITHUB-976] (Anthony Shaw) - Fix LIBCLOUD-806 bug where vsphere driver cannot be instantiated. (GITHUB-967) [Anthony Shaw] - [google compute] Improve performance of list nodes by caching volume information. (GITHUB-813, LIBCLOUD-826) [Tom Melendez] Changes in Apache Libcloud 1.5.0 -------------------------------- Common ~~~~~~ - Set Dimension Data compute, backup and load balancer to default to 2.4 API. (GITHUB-961) [Samuel Chong] Compute ~~~~~~~ - [azure] New method for accessing rate cards. (GITHUB-957) [Soren L. Hansen] - [gce] Allow multiple preemptible instances to be created. (GITHUB-954) [John Baublitz] - [openstack] Add new Connection class to support VOMS proxys to keystone servers. (GITHUB-959) [micafer] - [outscale] Added support for changed API for describing quotas. (GITHUB-960) [Javier M. Mellid] - [ec2] Added m4 instances to us-gov and brazil, added m4.16xlarge to all. (GITHUB-964) [Matthew Tyas] - Add new CloudScale.ch driver (GITHUB-951) [Dave Halter] - [google compute] Bug fix for ex_create_multiple_nodes Google Cloud disk auto delete. (GITHUB-955) [John Baublitz] - [google compute] Add "MULTI_IP_SUBNET" guestOsFeatures option. (GITHUB-956) [Max Illfelder] - [dimensiondata] Added support for 2.4 API, added support for image import, cloning. Add feature for changing NIC VLANs, add feature for changing NIC order for a server. (GITHUB-953) [Samuel Chong] - [ec2] Add US-EAST2 (Ohio). (GITHUB-946) [Matthew Harris] - [google compute] Fix to allow multiple node creation with subnets. (GITHUB-949) [John Baublitz] Container ~~~~~~~~~ - [rancher] The scheme (secure) and port no longer need to be explicitly specified, allowing a user to simply copy in the string provided to them from Rancher. (GITHUB-958) [Matthew Ellison] Changes in Apache Libcloud 1.4.0 -------------------------------- Compute ~~~~~~~ - Introduce new Azure ARM driver. [Peter Amstulz] - [ec2] Fix the bug that created the node at ecs driver and implement the method for creating public ip. (GITHUB-943) [watermelo] - [profitbricks] changes to the ProfitBricks compute driver to drop support for the old SOAP api (now end of life) and provide support for v3 of the REST api. (GITHUB-938) [Matt Finucane] - [cloudsigma] Added Warsaw (waw) region. (GITHUB-942) [Kamil Chmielewski] - [google compute] List images fix for projects > 500 images. (GITHUB-939) [Scott Crunkleton] - [ec2] Add st1 and sc1 volume types to valid types. (GITHUB-925) [Sean Goller] - [digital ocean] add ex_change_kernel in DigitalOcean_v2 driver. (GITHUB-922) [Rick van de Loo] - [digital ocean] add ex_hard_reboot in DigitalOcean_v2 driver. (GITHUB-920) [Rick van de Loo] - [openstack] add ex_start_node for the openstack driver. (GITHUB-919) [Rick van de Loo] - [vultr] Extra Attributes for Node Creation on Vultr. (GITHUB-917) [Fahri Cihan Demirci] - [vultr] Implement SSH Key Create/Delete Methods for Vultr. (GITHUB-914) [Fahri Cihan Demirci] - [dimension data] No longer throw error when powering off a node that is already stopped. (GITHUB-912) [Samuel Chong] - [dimension data] Refactor create_node for MCP2 to support CaaS API 2.3 feature. Can now specify Network Adapter Name for primary and additional NIC. Parameters in create_node function is tailored for MCP2. (GITHUB-902) [Samuel Chong] - Volume snapshot operations, i.e. creating, listing and deleting volume snapshots, for the Digital Ocean driver. (LIBCLOUD-861, GITHUB-909) [Fahri Cihan Demirci] - Added snapshot management to OVH compute. (GITHUB-897) [Anthony Monthe] - [GCE] Support for HTTP(S) proxies with BackendServices. (GITHUB-856) [Tom Melendez] Container ~~~~~~~~~ - [docker] As reported in the corresponding bug, the docker daemon will respond in an install_image call with all the messages produced during the procedure parsed as json docs. In that case the response headers also contain the value 'transfer-encoding':'chunked'. That kind of response can now be parsed properly by the DockerResponse parse_body method. Also, another small change is that previously the id of the new image was marked in the json document as id, but now it's marked as sha256, so the regex used to discover the id has been updated. (GITHUB-918) [Pavlos Tzianos] Load Balancing ~~~~~~~~~~~~~~ - Introduce AWS Application Load Balancer (ALB) driver. (LIBCLOUD-869, GITHUB-936) [Anton Kozyrev] - Fix bug where GCE Load balancer supposes that all VMs have public ips. (LIBCLOUD-879, GITHUB-952) [Chris Walker] Storage ~~~~~~~ - [s3] Add AP-Southeast2 as region. - [google] Prevent GCE auth to hide S3 auth. (GITHUB-921) [Quentin Pradet] - [GCS] Fixed some google_storage.py URL cleaning. (GITHUB-901) [Scott Crunkleton] Changes in Apache Libcloud 1.3.0 -------------------------------- General ~~~~~~~ - Introduced new base API for instantiating drivers. (GITHUB-822) [Anthony Shaw] - Added certificate path for SLES12/OpenSUSE12. (GITHUB-884) [Michael Calmer] - Deprecate DigitalOcean v1 API support in favour of v2 API. (GITHUB-889, GITHUB-892) [Andrew Starr-Bochicchio] - Deprecate RunAbove cloud drivers in favour of new OVH cloud driver. (GITHUB-891) [Anthony Monthe] Compute ~~~~~~~ - Fix reporting function for detailed admin logs in Dimension Data Driver. (GITHUB-898) [Anthony Shaw] - Added edit firewall functionality to Dimension Data driver. (GITHUB-893) [Samuel Chong] - Bugfix - Fixed listing nodes issue in Python 3. (LIBCLOUD-858, GITHUB-894) [Fahri Cihan Demirci] - Added FCU (Flexible Compute Unit) support to the Outscale driver. (GITHUB-890) [Javier M. Mellid] - [google compute] Add "WINDOWS" guestOsFeatures option. (GITHUB-861) [Max Illfelder] - When creating volumes on OpenStack with defaults for `location` or `volume_type`, newer OpenStack versions would throw errors. The OpenStack driver will now only post those arguments if non-`NoneType`. (GITHUB-857) [Allard Hoeve] - When fetching the node details of a non-existing node, OpenStack would raise a `BaseHTTPError` instead of returning `None`, as was intended. Fixed tests and code. (GITHUB-864) - Added `ex_stop_node` to the OpenStack driver. (GITHUB-865) [Allard Hoeve] - When creating volume snapshot, the arguments `name` and `description` are truely optional when working with newer OpenStack versions. The OpenStack driver will now only post thost arguments if they are non-`NoneType`. (GITHUB-866) [Allard Hoeve] - StorageVolumeSnapshot now has an attribute `name` that has the name of the snapshot if the provider supports it. This used to be `.extra['name']`, but that is inconsistent with `Node` and `StorageVolume`. The `extra` dict still holds `name` for backwards compatibility. (GITHUB-867) [Allard Hoeve] Container ~~~~~~~~~ - Introduced new Rancher driver (GITHUB-876) [Mario Loria] - Fixed bug in Docker util library for fetching images from the docker hub API. API was returning 301 and redirects were not being followed. (GITHUB-862) [Anthony Shaw] Load Balancer ~~~~~~~~~~~~~ - Added fetch tags support in elb driver. (GITHUB-848) [Anton Kozyrev] Storage ~~~~~~~ - Added storage permissions for Google Cloud Storage. (GITHUB-860) [Scott Crunkleton] Changes in Apache Libcloud 1.2.1 -------------------------------- Backup ~~~~~~ - Fix issue enabling backups on Dimension Data driver. (GITHUB-858) [Mark Maglana, Jeff Dunham, Anthony Shaw] Changes in Apache Libcloud 1.2.0 -------------------------------- General ~~~~~~~ - Fix caching of auth tokens in the Google Compute Engine drivers. Now we make sure that the file is truncated before writing a new token. Not truncating the file would cause issues if the new token is shorted then the existing one which is cached in the file. (GITHUB-844, LIBCLOUD-835) [Paul Tiplady] Compute ~~~~~~~ - [gce] Fix image undeprecation in GCE. (GITHUB-852) [Max Illfelder] - [gce] Added Managed Instance Groups. (GITHUB-842) [Tom Melendez] - [gce] Allow undeprecation of an image. (GITHUB-851) [Max Illfelder] - [cloudstack] BUGFIX Values with wildcards failed signature validation. (GITHUB-846) [Ronald van Zantvoot] - [cloudstack] Added StorageState-Migrating to the cloudstack driver. (GITHUB-847) [Marc-Aurèle Brothier] - [google compute] Update copy image logic to match create image. (GITHUB-828) [Max Illfelder] - Removed HD attribute from the Abiquo compute driver to support the 3.4 API. (GITHUB-840) [David Freedman] - Add image and size details to `list_nodes` response in Dimension Data driver. (GITHUB-832) [Anthony Shaw] - Add support for changing VM admin password in VMware driver. (GITHUB-833) [Juan Font Alonso] - Add Barcelona (Spain) region to the Aurora Compute driver. (GITHUB-835) [Wido den Hollander] - Various improvements in the libvirt driver. (GITHUB-838) [Rene Kjellerup] Load balancer ~~~~~~~~~~~~~ - Add support for temporary IAM role credentials (token) to the AWS ELB driver. (GITHUB-843) [Anton Kozyrev] DNS ~~~ - Updated the 'extra' parameter in `update_record()` to be optional in aurora driver. (GITHUB-830) [Wido den Hollander] - Support for iterating over records and zones in the Aurora DNS driver. (GITHUB-829) [Wido den Hollander] - Add support for DS, PTR, SSFHFP and TLSA record type to the Aurora DNS driver. (GITHUB-834) [Wido den Hollander] Container ~~~~~~~~~ - Add network mode and labels when creating containers within docker driver. (GITHUB-831) [Jamie Cressey] Storage ~~~~~~~ - Fix authentication issue in S3/China region, disabled multipart uploads as not supported by region. (GITHUB-839) [Luke Morfitt] Changes with Apache Libcloud 1.1.0 ---------------------------------- General ~~~~~~~ - Add support for automatic SNI (SSL extension) using the hostname supplied to connect to. Note: This functionality is only available in Python 2.7.9 and Python >= 3.2. (LIBCLOUD-827, GITHUB-808) [David Freedman] Compute ~~~~~~~ - Add support image guest OS features in GCE driver. (GITHUB-825) [Max Illfelder] - Added forceCustimization option for vcloud director driver. (GITHUB-824) [Juan Font] - Add node lookup by UUID for libvirt driver. (GITHUB-823) [Frank Wu] - Add block storage support to DigitalOcean node driver. (GITHUB-807) [Adam Wolfe Gordon] - Add SASL auth support to libvirt driver. (GITHUB-809) [Katana-Steel] - Allow VIPs in Dimension Data driver to bind to any port. (GITHUB-818) [Mark Maglana] - Add support for deleting a security group to the Aliyun ECS driver. (GITHUB-816) [Heng Wu] - Add ``ex_force_customization`` argument to the ``ex_deploy_node`` in vCloud driver. (GITHUB-824) [Juan Font] - Add support for listing attributes for a particular security group (``ex_list_security_group_attributes``) to the Aliyun ECS driver. (GITHUB-826) [Heng Wu] - Add new Mumbai, India region to the EC2 driver. [Tomaz Muraus] - Add driver for the new AWS cn-north-1 region. (GITHUB-827, LIBCLOUD-820) [Jamie Cressey] - Fix authentication with temporary IAM role credentials (token) in the EC2 driver. (GITHUB-820) [Alejandro González] Container ~~~~~~~~~ - Fixed API compatibility for Docker Container driver with API 1.24, set driver to use versioned URL for all communication. Backported changes to 1.21 API (GITHUB-821) [Anthony Shaw] Load Balancer ~~~~~~~~~~~~~ - Added additional parameters to the Rackspace driver in `list_balancers` for filtering and searching. (GITHUB-803) [João Paulo Raittes] Changes with Apache Libcloud 1.0.0 ---------------------------------- General ~~~~~~~ - Fix a regression with ``timeout`` argument provided via ``_ex_connection_class_kwargs`` method being overriden with ``None`` inside the ``BaseDriver`` constructor method. Reported by Jay Rolette. (GITHUB-755) [Tomaz Muraus, Jay Rolette] - Fix OpenStack v3 authentication and allow user to provide a custom value for the OpenStack ``domain`` parameter. Previously only possible value as a default value of ``Default``. (GITHUB-744) [Lionel Schaub] - Add support for authenticating against Keystone and OpenStack based clouds using OpenID Connect tokens. (GITHUB-789) [Miguel Caballer] Compute ~~~~~~~ - GCE nodes can be launched in a subnetwork (GITHUB-783) [Lars Larsson] - Add Subnetworks to GCE driver (GITHUB-780) [Eric Johnson] - Fix missing pricing data for GCE (LIBCLOUD-713, GITHUB-779) [Eric Johnson] - Add Image Family support for GCE (GITHUB-778) [Rick Wright] - Fix a race condition on GCE driver `list_nodes()`- Invoking GCE’s `list_nodes()` while some VMs are being shutdown can result in the following `libcloud.common.google.ResourceNotFoundError` exception to be raised. (GITHUB-727) [Lénaïc Huard] - Allow user to filter nodes by location by adding optional `location` argument to the `list_nodes()` method in the CloudStack driver. (GITHUB-737) [Lionel Schaub] - Fix OpenStack IP type resolution - make sure IP addresses are correctly categorized and assigned on `private_ips` and `public_ips` Node attribute. (GITHUB-738) [Lionel Schaub] - Add new `Perth, Australia` and `Manila, Philippines` region to the CloudSigma v2 driver. [Tomaz Muraus] - Update libvirt driver so it returns false if a non-local libvirt URL is used (right now only local instances are supported). (LIBCLOUD-820, GITHUB-788) [René Kjellerup] - Update libvirt driver to use `ip neight` command instead of `arp` to retrieve node MAC address if `arp` command is not available or the current user doesn't have permission to use it. (LIBCLOUD-820, GITHUB-788) [René Kjellerup] - Update ``create_volume`` method in the CloudStack driver and add ``ex_volume_type`` argument to it. If this argument is provided, a volume which names matches this argument value will be searched and selected among the available disk offerings. (GITHUB-785) [Greg Bishop] Storage ~~~~~~~ - Add support for AWS signature v4 to the Outscale storage driver. (GITHUB-736) [Javier M. Mellid] - Add new S3 RGW storage driver. (GITHUB-786, GITHUB-792) [Javier M. Mellid] Loadbalancer ~~~~~~~~~~~~ - Update AWS ELB driver to use signature version 4 for authentication. This way, the driver also work with the `eu-central-1` region. (GITHUB-796) [Tobias Paepke] DNS ~~~ - Add BuddyNS driver. (GITHUB-742) [Oltjano Terpollari] - Added DNSPod driver (https://www.dnspod.com). (GITHUB-787) [Oltjano Terpollari] Changes with Apache Libcloud 1.0.0-rc2 -------------------------------------- General ~~~~~~~ - Fix a bug with consuming stdout and stderr in the paramiko SSH client which would manifest itself under very rare condition when a consumed chunk only contained a single byte or part of a multi byte UTF-8 character. [Lakshmi Kannan, Tomaz Muraus] - Increase default chunk size from ``1024`` to ``4096`` bytes in the paramiko SSH client. This results in smaller number of receive calls on the average. [Tomaz Muraus] - Fix to Dimension Data API address for Middle-East and Africa (GITHUB-700) [Anthony Shaw] - Addition of Dimension Data Australia federal government region to dimension data drivers. (GITHUB-700) [Anthony Shaw] - Throw a more user-friendly exception on "No address associated with hostname". (GITHUB-711, GITHUB-714, LIBCLOUD-803) [Tomaz Muraus, Scott Crunkleton] * Remove deprecated provider constants with the region in the name and related driver classes (e.g. ``EC2_US_EAST``, etc.). Those drivers have moved to single provider constant + ``region`` constructor argument model. [Tomaz Muraus] * Introduce new `list_regions`` class method on the base driver class. This method is to be used with provider drivers which support multiple regions and ``region`` constructor argument. It allows users to enumerate available / supported regions. [Tomaz Muraus] Compute ~~~~~~~ - [dimension data] added support for VMWare tools VM information inside list_nodes responses (GITHUB-734) [Jeff Dunham] - [ec2] added ex_encrypted and ex_kms_key_id optional parameters to the create volume method (GITHUB-729) [Viktor Ognev] - [dimension data] added support for managing host anti-affinity rules, added paging support to all supported calls and added support for requesting priority ordering when creating ACL rules (GITHUB-726) [Jeff Dunham] - [openstack] when creating floating IPs, added pool_id as an optional argument (GITHUB-725) [marko-p] - [google compute] Added setMachineType method to allow for changing sizes of instances (GITHUB-721) [Eric Johnson] - [google compute] allow bypassing image search in standard project list (GITHUB-713) [Max Illfelder] - Add support for requesting a MKS token for accessing the remote console in VMware vCloud driver (GITHUB-706) [Juan Font Alonso] - Add support in VMware vCloud driver for v5.5 API, with snapshot support (GITHUB-658) [Juan Font Alonso] - Added support for adding a family to an image on Google Compute Driver (GITHUB-704) [Max Illfelder] - Deprecated IBM SCE, HP Helion, OpSource, Ninefold and CloudFrames drivers, removed driver code and tests. (GITHUB-701, LIBCLOUD-801) [Anthony Shaw] - Introduced error messages (`libcloud.compute.deprecated`) for deprecated drivers (GITHUB-701, LIBCLOUD-801) [Anthony Shaw] - New Compute drivers- BSNL, Indosat, Med-1, NTT-America, Internet Solutions (GITHUB-700) [Anthony Shaw] - Fix to set default signature version for AWS Seoul region to v4, removed non-supported size (hs1.xlarge) (GITHUB-684) [Geunwoo Shin] - Support filtering by location in list_nodes for dimension data compute driver fix lack of paging support (GITHUB-691) [Jeff Dunham] - Support for filtering by IPv4, IPv6, network, network domain, VLAN in Dimension data driver. (GITHUB-694) [Jeff Dunham] - Added `Node.created_at` which, on supported drivers, contains the datetime the node was first started. (GITHUB-698) [Allard Hoeve] [Rick van de Loo] - New driver for Aliyun Elastic Compute Service. (LIBCLOUD-802, GITHUB-712) [Sam Song, Heng Wu] Storage ~~~~~~~ - Added Outscale storage driver (GITHUB-730) [Javier M. Mellid] - Improvements to Google Auth for Storage and Compute and MIME bug fix (LIBCLOUD-800, GITHUB-689) [Scott Crunkleton] - Implement ``get_container``, ``get_object`` and ``upload_object_via_stream`` methods in the Backblaze B2 storage driver. Note: Backblaze API doesn't upload streaming uploads so when using ``upload_object_via_stream`` whole file is read and buffered in memory. (GITHUB-696) [Jay jshridha] - New driver for Aliyun OSS Storage Service. (LIBCLOUD-802, GITHUB-712) [Sam Song] Loadbalancer ~~~~~~~~~~~~ - New driver for Aliyun SLB Loadbalancer Service. (LIBCLOUD-802, GITHUB-712) [Sam Song] DNS ~~~~ - Added NearlyFreeSpeech.net (NSFN) driver [Ken Drayer] (GITHUB-733) - Added Lua DNS driver [Oltjano Terpollari] (GITHUB-732) - Added NSOne driver [Oltjano Terpollari] (GITHUB-710) - Fix a bug in the GoDaddy driver - make sure ``host`` attribute on the connection class is correctly set to the hostname. [Tomaz Muraus] - Fix handling of ``MX`` records in the Gandi driver. (GITHUB-718) [Ryan Lee] Backup ~~~~~~ - Dimension Data - added additional testing, fixed bug on client response naming, added support for adding backup clients to a backup enabled node. (GITHUB-692, GITHUB-693, GITHUB-695) [Jeff Dunham] Changes with Apache Libcloud 1.0.0-pre1 --------------------------------------- General ~~~~~~~ - Introduction of container based drivers for Docker, Rkt and Container-as-a-service providers (LIBCLOUD-781, GITHUB-666) [Anthony Shaw] - Introduce a new ``libcloud.backup`` API for Backup as a Service projects and products. (GITHUB-621) [Anthony Shaw] - Also retry failed HTTP(s) requests upon transient "read operation timed out" SSL error. (GITHUB-556, LIBCLOUD-728) [Scott Kruger] - Throw a more user-friendly exception if a client fails to establish SSL / TLS connection with a server because of an unsupported SSL / TLS version. (GITHUB-682) [Tomaz Muraus] Compute ~~~~~~~ - Add ap-northeast-2 region to EC2 driver (South Korea) (GITHUB-681) [Anthony Shaw] - Added Added volume type to EC2 volume extra to EC2 driver. (GITHUB-680) [Gennadiy Stas] - Add LazyObject class that provides lazy-loading, see `GCELicense` for usage (LIBCLOUD-786, GITHUB-665) [Scott Crunkleton] - Added t2.nano instance type to EC2 Compute driver (GITHUB-663) [Anthony Shaw] - Support for passing the image ID as a string instead of an instance of image when creating nodes in Dimension Data driver. (GITHUB-664) [Anthony Shaw] DNS ~~~ - Add support for 'health checks' in Aurora DNS driver (GITHUB-672) [Wido den Hollander] - Make sure ``ttl`` attribute is correctly parsed and added to the ``Record`` ``extra`` dictionary. (GITHUB-675) [Wido den Hollander] - Improve unit tests of Aurora DNS driver (GITHUB-679) [Wido den Hollander] Changes with Apache Libcloud 0.20.1 ----------------------------------- Compute ~~~~~~~ - [google] Allow for old and new style service account client email address (LIBCLOUD-785) [Hoang Phan] Changes with Apache Libcloud 0.20.0 ----------------------------------- General ~~~~~~~ - Added .editorconfig file for easier editing (GITHUB-625) [Misha Brukman] - Fix a bug with Libcloud accidentally setting paramiko root logger level to DEBUG (this should only happen if ``LIBCLOUD_DEBUG`` environment variable is provided). Reported by John Bresnahan. (LIBCLOUD-765) [Tomaz Muraus, John Bresnahan] - Simply travis and tox config (.travis.yml, tox.ini). (GITHUB-608) [Anthony Monthe] - Fixed Python2.6 unit testing (and Google Cloud Storage tests) (GITHUB-648) [Scott Crunkleton] Compute ~~~~~~~ - [google] Allow for old and new style service account client email address (LIBCLOUD-785) [Hoang Phan] - Minor security improvement for storing cached GCE credentials (LIBCLOUD-718) [Siim Põder] - Removed DreamHosts Compute Driver, DreamHosts users will now use the OpenStack Node driver since DreamHosts are OpenStack API compliant (GITHUB-655) [Stephano Maffulli] - Added additional kwargs to the create_node method for Dimension Data driver, allowing the user to specify the RAM and CPU upfront. Added a ex_reconfigure_node method and ex_list_customer_images as well as updating the API to 2.1. (LIBCLOUD-783, GITHUB-656) [Anthony Shaw] - The EC2 Instance Type updated with correct disk sizes (especially the disk size for the m3 instances), conversion errors between GiB an M[i]B, disk count were the cause. Added instance types - g2.8xlarge and t2.large. (GITHUB-646) [Philipp Hahn] - Add update node, update VMware tools, add storage, change storage size or speed, remove storage to Dimension Data Driver. (LIBCLOUD-775, GITHUB-644) [Anthony Shaw] - Include 'service_name' support in _parse_service_catalog_auth_v3 for Openstack Drivers (GITHUB-647) [Steve Gregory] - Outscale inc & sas driver update (GITHUB-645) [@LordShion] - Add new `eu-west-2` & `us-east-2` regions to the OUTSCALE_INC & OUTSCALE_SAS drivers. [Filipe Silva /lordshion] - [google compute] add pricing data update script (GITHUB-464) [Misha Brukman] - Fix a bug in the ``list_volumes`` method in the CloudStack driver so it returns an empty list if no volumes are found. (GITHUB-617) [Wido den Hollander] - Return proper volume state for CloudStack volumes. (GITHUB-615, LIBCLOUD-764) [Wido den Hollander] - Add support for multiple regions in Aurora compute driver (GITHUB-623) [Wido den Hollander] - Fix value of ``node.extra['ip_addresses']`` node attribute in the CloudStack driver. (LIBCLOUD-767, GITHUB-627) [Atsushi Sasaki] - Make sure that ``node.public_ips`` attribute in the CloudStack driver doesn't contain duplicated values.. (LIBCLOUD-766, GITHUB-626) [Atsushi Sasaki] - Allow user to wait for a resource to reach a desired state in the Dimension Data driver by using new ``ex_wait_for_state`` method. (LIBCLOUD-707, GITHUB-631) [Anthony Shaw] - Added M4 pricing and instance information to EC2 driver (GITHUB-634) [Benjamin Zaitlen] - Added C4 instance information to EC2 driver (GITHUB-638) [amitofs] - Allow location of the datacenter to be supplied in ProfitBricks driver (LIBCLOUD-771, GITHUB-635) [Joel Reymont] - Reduce redundant API calls in CloudStack driver (LIBCLOUD-590, GITHUB-641) [Atsushi Sasaki] - Add an additional argument to libcloud.compute.drivers.GCENodeDriver.create_node to allow for creation of preemptible GCE instances (GITHUB-643) [@blawney] - GoogleStorageDriver can now use either our S3 authentication or other Google Cloud Platform OAuth2 authentication methods. (GITHUB-633) [Scott Crunkleton] - All NodeState, StorageVolumeState, VolumeSnapshotState and Provider attributes are now strings instead of integers. (GITHUB-624) [Allard Hoeve] Storage ~~~~~~~ Loadbalancer ~~~~~~~~~~~~ DNS ~~~ - RackSpace driver - New DNS driver methods: - ex_iterate_ptr_records - ex_get_ptr_record - ex_create_ptr_record - ex_update_ptr_record - ex_delete_ptr_record This should cover all of the functionality offered by the Rackspace DNS API in regards to RDNS. (LIBCLOUD-780, GITHUB-652) [Greg Hill] - Update ``create_record`` in the WorldWideDNS driver so it automatically selects a slot if one is not provided by the user via ``extra['entry']`` argument. (GITHUB-621) [Alejandro Pereira] - Introduce GoDaddy DNS Driver with examples and documentation. (LIBCLOUD-772, GITHUB-640, LIBCLOUD-778) [Anthony Shaw] - Add new driver for CloudFlare DNS (https://www.cloudflare.com/dns/). (GITHUB-637) [Tomaz Muraus] Changes with Apache Libcloud 0.19.0 ----------------------------------- General ~~~~~~~ - Update Rackspace AUTH_URL (LIBCLOUD-738) [Brian Curtin] - Fix ``LIBCLOUD_DEBUG`` mode so it works on Python 3.x. [Tomaz Muraus] - Fix Libcloud code so it doesn't throw an exception if simplejson < 2.1.0 is installed. (LIBCLOUD-714, GITHUB-577) [Erik Johnson] - Fix endpoint URL for DimensionData Asia Pacific region. (GITHUB-585) [Anthony Shaw] - Document potential time drift issue which could cause authentication in the GCE drivers to fail. (GITHUB-571) [Michal Tekel] - Update documentation for EC2 - make sure they reflect region changes from 0.14 release. (GITHUB-606) [James Guthrie] Compute ~~~~~~~ - Fixed malformed XML requests with Dimension Data driver. (LIBCLOUD-760, GITHUB-610) [Anthony Shaw] - Update list of scopes for Google Compute Engine driver. (GITHUB-607) [Otto Bretz] - Allow user to filter VPC by project in the CloudStack driver by passing ``project`` argument to the ``ex_list_vps`` method. (GITHUB-516) [Syed Mushtaq Ahmed] - Add volume management methods and other various improvements and fixes in the RunAbove driver. (GITHUB-561) [Anthony Monthe] - Add support and update Dimension Data driver to use API v2.0 by default. (LIBCLOUD-736, GITHUB-564) [Anthony Shaw] - Add new ``ex_virtual_network_name`` and ``ex_network_config`` argument to the `create_node`` method in the Azure driver. With those arguments user can now specify which virtual network to use. (GITHUB-569) [Jesaja Everling] - Fix ``create_node`` method in the GCE driver calling inexistent method (ex_get_disk instead of ex_get_volume). (GITHUB-574) [Alex Poms] - Allow user to pass ``proxy_url`` keyword argument to the VCloud driver constructor. (GITHUB-578) [Daniel Pool] - Various fixes and improvements in the DimensionData driver (support for creating servers in MCP 1 and 2 data center, performance improvements in the location fetching, etc.). (GITHUB-587, GITHUB-593, LIBCLOUD-750, LIBCLOUD-753) [Anthony Shaw] - Added ``ex_assign_public_ip`` argument to ``create_node`` in the EC2 driver. (GITHUB-590) [Kyle Long] - Added ``ex_terminate_on_shutdown`` argument to ``create_node`` in the EC2 driver. (GITHUB-595) [Kyle Long] - Various fixes and improvements in the ``ex_authorize_security_group_ingress`` in the CloudStack driver. (LIBCLOUD-749, GITHUB-580) [Lionel Schaub] - Add pricing information for Softlayer. (LIBCLOUD-759, GITHUB-603) [David Wilson] - Standardize VolumeSnapshot states into the ``state`` attribute. (LIBCLOUD-758, GITHUB-602) [Allard Hoeve] Storage ~~~~~~~ - Add support for ``sa-east-1`` region to the Amazon S3 driver. (GITHUB-562) [Iuri de Silvio] - Fix handling of binary data in Local storage driver on Python 3. Now the file which is to be written or read from is opened in the binary mode (``b`` flag). (LIBCLOUD-725, GITHUB-568) [Torf] Loadbalancer ~~~~~~~~~~~~ - Add a new driver for DimensionData load-balancing service (http://cloud.dimensiondata.com/). (LIBCLOUD-737, GITHUB-567) [Anthony Shaw] DNS ~~~ - Update Google Cloud DNS API from 'v1beta1' to 'v1' (GITHUB-583) [Misha Brukman] - Add new driver for AuroraDNS service. (GITHUB-562, LIBCLOUD-735) [Wido den Hollander] - Fix "_to_record" in the Route53 driver - make sure it doesn't throw if the record TTL is not available. [Tomaz Muraus] - Add new driver for WorldWideDNS service (http://www.worldwidedns.net/home.asp). (GITHUB-566, LIBCLOUD-732) [Alejandro Pereira] - Add new driver for DNSimple service (https://dnsimple.com/). (GITHUB-575, GITHUB-604, LIBCLOUD-739) [Alejandro Pereira, Patrick Humpal] - Add new driver for PointDNS service (https://pointhq.com). (GITHUB-576, GITHUB-591, LIBCLOUD-740) [Alejandro Pereira] - Add new driver for Vultr DNS service (https://www.vultr.com). (GITHUB-579, GITHUB-596, LIBCLOUD-745) [Alejandro Pereira, Janez Troha] - Add new driver for Liquidweb DNS service (http://www.liquidweb.com/). (GITHUB-581, LIBCLOUD-746) [Oltjano Terpollari, Alejandro Pereira] - Add new driver for Zonomi DNS hosting service (http://zonomi.com/). (GITHUB-582, LIBCLOUD-747) [Oltjano Terpollari, Alejandro Pereira] - Add new driver for Durable DNS service (https://durabledns.com/). (GITHUB-588, LIBCLOUD-748) [Oltjano Terpollari, Alejandro Pereira] Changes with Apache Libcloud 0.18.0 ----------------------------------- General ~~~~~~~ - Use native ``ssl.match_hostname`` functionality when running on Python >= 3.2 and only require ``backports.ssl_match_hostname`` dependency on Python versions < 3.2. [Tomaz Muraus] - Add support for AWS Signature version 4. Note: Currently only GET HTTP method is supported. (GITHUB-444) [Gertjan Oude Lohuis] - Fix a bug in the debug mode logging (LIBCLOUD_DEBUG). Logging to the debug file would throw an exception if the text contained non-ascii characters. [Tomaz Muraus] - Fix a bug with connection code throwing an exception if a port was a unicode type and not a str or int. (GITHUB-533, LIBCLOUD-716) [Avi Weit] - Update ``is_valid_ip_address`` function so it also works on Windows. (GITHUB-343, GITHUB-498, LIBCLOUD-601, LIBCLOUD-686) [Nicolas Fraison, Samuel Marks] - Add support for retrying failed HTTP requests. Retrying is off by default and can be enabled by setting ``LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS`` environment variable. (GITHUB-515, LIBCLOUD-360, LIBCLOUD-709) - Fix a bug in consuming stdout and stderr strams in Paramiko SSH client. In some cases (like connecting to localhost via SSH), exit_status_ready gets set immediately even before the while loop to consume the streams kicks in. In those cases, we will not have consumed the streams at all. (GITHUB-558) [Lakshmi Kannan] Compute ~~~~~~~ - Google Compute now supports paginated lists including filtering. (GITHUB-491) [Lee Verberne] - OpenStackNodeSize objects now support optional, additional fields that are supported in OpenStack 2.1: `ephemeral_disk`, `swap`, `extra`. (GITHUB-488, LIBCLOUD-682) [Greg Hill] - StorageVolume objects now have an attribute `state` that holds a state variable that is standardized state across drivers. Drivers that currently support the `state` attribute are OpenStack and EC2. StorageVolume objects returned by drivers that do not support the attribute will have a `state` of `None`. When a provider returns a state that is unknown to the driver, the state will be `UNKNOWN`. Please report such states. A couple of drivers already put state fields in the `extra` fields of StorageVolumes. These fields were kept for backwards-compatibility and for reference. (GITHUB-476) [Allard Hoeve] - StorageVolume objects on EC2 and OpenStack now have a key called snapshot_id in their extra dicts containing the snapshot ID the volume was based on. (GITHUB-479) [Allard Hoeve] - OpenStack driver: deprecated ex_create_snapshot and ex_delete_snapshot in favor of create_volume_snapshot and destroy_volume_snapshot. Updated base driver method create_storage_volume argument name to be optional. (GITHUB-478) [Allard Hoeve] - Add support for creating volumes based on snapshots to EC2 and OS drivers. Also modify signature of base NodeDriver.create_volume to reflect the fact that all drivers expect a StorageSnapshot object as the snapshot argument. (GITHUB-467, LIBCLOUD-672) [Allard Hoeve] - VolumeSnapshots now have a `created` attribute that is a `datetime` field showing the creation datetime of the snapshot. The field in VolumeSnapshot.extra containing the original string is maintained, so this is a backwards-compatible change. (GITHUB-473) [Allard Hoeve] - Improve GCE create_node, make sure ex_get_disktype function (GITHUB-448) [Markos Gogoulos] - GCE driver fix to handle unknown image projects (GITHUB-447) [Markos Gogoulos] - Allow user to pass ``ex_blockdevicemappings`` argument to the create_node method in the OpenStack driver. (GITHUB-398, LIBCLOUD-637) [Allard Hoeve] - Fix ``list_volume_snapshots`` method in the EC2 driver so it comforms to the base API. (LIBCLOUD-664, GITHUB-451) [Allard Hoeve] - Add ``volumes_attached`` attibute to ``node.extra`` in the OpenStack driver. (LIBCLOUD-668, GITHUB-462) [Allard Hoeve] - Add the following new methods to the Linode driver: ``ex_list_volumes``, ``ex_create_volume``, ``ex_destroy_volume``. (LIBCLOUD-649, GITHUB-430) [Wojciech Wirkijowski] - Add ``list_volume_snapshots`` method to the OpenStack driver. (LIBCLOUD-663, GITHUB-450) [Allard Hoeve] - Add Site to Site VPN functionality to CloudStack driver. (GITHUB-465) [Avi Nanhkoesingh] - Add affinity group support to CloudStack driver (LIBCLOUD-671, GITHUB-468) [Mateusz Korszun] - Add a support for a new AWS Frankfurt, Germany region (``eu-central-1``) to the EC2 driver using AWS Signature v4. (GITHUB-444) [Gertjan Oude Lohuis, Tomaz Muraus] - Allow Filtering in EC2 list_images() driver (GITHUB-456, LIBCLOUD-667) [Katriel Traum] - Add ex_list_ip_forwarding_rules() to CloudStack driver (GITHUB-483) [Atsushi Sasaki] - Add AURORA compute driver (LIBCLOUD-641, GITHUB-477) [Wido den Hollander] - Update ``ex_describe_tags`` method in the EC2 driver and allow user to list tags for any supported resource. Previously you could only list tags for a node or a storage volume. (LIBCLOUD-676, GITHUB-482) [John Kinsella] - Various improvements in the HostVirual driver (code refactoring, support for managing "packages"). (LIBCLOUD-670, GITHUB-472) [Dinesh Bhoopathy] - Add support for DigitalOcean API v2.0 while maintaining support for the old API v2.0. Note: API v2.0 is now used by default. To use the old API v1.0, pass ``api_version='1.0'`` argument to the driver constructor. (GITHUB-442) [Andrew Starr-Bochicchio] - Add new ``d4.`` instance types to the EC2 driver. Also update EC2 pricing data. (GITHUB-490) [Tomaz Muraus] - Add new driver for Microsft Azure Virtual Machines service. (LIBCLOUD-556, GITHUB-305, GITHUB-499, GITHUB-538) [Michael Bennett, davidcrossland, Richard Conway, Matt Baldwin, Tomaz Muraus] - Fix VPC lookup method in CloudStack driver (GITHUB-506) [Avi Nanhkoesingh] - Add new driver for the Dimension Data provider based on the OpSource driver. (LIBCLOUD-698, GITHUB-507, LIBCLOUD-700, GITHUB-513) [Anthony Shaw] - Add "virtualmachine_id" attribute to the ``CloudStackAddress`` class in the CloudStack driver. (LIBCLOUD-679, GITHUB-485) [Atsushi Sasaki] - Allow user to pass filters via arguments to the ``ex_list_port_forwarding_rules`` in the CloudStack driver. (LIBCLOUD-678, GITHUB-484) [Atsushi Sasaki] - Fix an issue with ``list_nodes`` in the CloudSigma driver throwing an exception if a node in the list had a static IP. (LIBCLOUD-707, GITHUB-514) [Chris O'Brien] - Don't throw an exception if a limit for a particular CloudStack resource is "Unlimited" and not a number. (GITHUB-512) [Syed Mushtaq Ahmed] - Allow user to pass ``ex_config_drive`` argument to the ``create_node`` method in the OpenStack driver. (LIBCLOUD-356, GITHUB-330) [Ryan Parrish] - Add new driver for Cloudwatt (https://www.cloudwatt.com/en/) provider. (GITHUB-338) [Anthony Monthe] - Add new driver for Packet (https://www.packet.net/) provider. (LIBCLOUD-703, GITHUB-527) [Aaron Welch] - Update Azure VM pricing information and add information for new D instance types. (GITHUB-528) [Michael Bennett] - Add ``ex_get_node`` and ``ex_get_volume`` methods to CloudStack driver. (GITHUB-532) [Anthony Monthe] - Update CloudSigma driver so the "unavailable" and "paused" node state is correctly mapped to "error" and "paused" respectively. (GITHUB-517) [Chris O'Brien] - Add SSH key pair management methods to the Gandi driver. (GITHUB-534) [Anthony Monthe] - Add ``ex_get_node`` and ``ex_get_volume`` methods to Gandi driver. (GITHUB-534) [Anthony Monthe] - Add ``fault`` attribute to the ``extra`` dictionary of the ``Node`` instance returned by the OpenStack driver. (LIBCLOUD-730, GITHUB-557) [Nick Fox] - Add new driver for Onapp IaaS platform. (LIBCLOUD-691, GITHUB-502) [Matthias Wiesner] - Allow user to inject custom data / script into the Azure node by passing ``ex_custom_data`` argument to the ``create_node`` method. (LIBCLOUD-726, GITHUB-554) [David Wilson] - Add ``ex_create_cloud_service`` and ``ex_destroy_cloud_service`` method to the Azure driver. (LIBCLOUD-724, GITHUB-551) [David Wilson] - Add support for passing user data when creating a DigitalOcean node (``ex_user_data`` argument). (LIBCLOUD-731, GITHUB-559) [David Wilson] - Allow user to specify which arguments are passed to ``list_nodes`` method which is called inside ``wait_until_running`` by passing ``ex_list_nodes_kwargs`` argument to the ``wait_until_running`` method. (``ex_user_data`` argument). (LIBCLOUD-723, GITHUB-548) [David Wilson] - Allow user to pass ``ex_volume_type`` argument to the ``create_volume`` method in the OpennStack driver. (GITHUB-553) [Rico Echwald-Tijsen] - Add new driver for RunAbove (https://www.runabove.com) provider. (GITHUB-550) [Anthony Monthe] - Fix a bug with exception being throw inside the CloudStack driver when the provider returned no error message in the body. (GITHUB-555) [Konstantin Skaburskas] - Various improvements in the DigitalOcean driver: - Increase page size to API maximum. - Add ``ex_create_attr`` kwarg to ``create_node`` method. - Update all the ``list_*`` methods to use paginated requests - Allow user to specify page size by passing ``ex_per_page`` argument to the constructor. (LIBCLOUD-717, GITHUB-537) [Javier Castillo II] Storage ~~~~~~~ - Fix a bug with authentication in the OpenStack Swift driver. (GITHUB-492, LIBCLOUD-635) [Tom Fifield] - Add AuroraObjects Storage Driver. (GITHUB-540, LIBCLOUD-719) [Wido den Hollander] Loadbalancer ~~~~~~~~~~~~ - Add a new driver for Softlayer load-balancing service (https://www.softlayer.com/load-balancing). (GITHUB-500, LIBCLOUD-688) [Avi Weit] DNS ~~~ - Fix a bug when a ZoneDoesntExist exception was thrown when listing records for a zone which has no records in the HostVirtual driver. (GITHUB-460) [Vanč Levstik] - Correctly handle MX records priority in the Route53 driver. (GITHUB-469) [Vanč Levstik] - Allow user to create an A record which points directly to the domain zone name in the Route53 driver. (GITHUB-469) [Vanč Levstik] - Fix delete_zone method in the HostVirtual driver. (GITHUB-461) [Vanč Levstik] - Fix parsing of the record name in the HostVirtual driver. (GITHUB-461) [Vanč Levstik] - Add new driver for DigitalOcean DNS service. (GITHUB-505) [Javier Castillo II] Changes with Apache Libcloud 0.17.0 ----------------------------------- General ~~~~~~~ - Use ``match_hostname`` function from ``backports.ssl_match_hostname`` package to verify the SSL certificate hostname instead of relying on our own logic. (GITHUB-374) [Alex Gaynor] Compute ~~~~~~~ - Add new `eu-west-2` & `us-east-2` regions to the OUTSCALE_INC & OUTSCALE_SAS drivers. [Filipe Silva /lordshion] - GCE driver updated to include ex_stop_node() and ex_start_node() methods. (GITHUB-442) [Eric Johnson] - GCE driver now raises ResourceNotFoundError when the specified image is not found in any image project. Previously, this would return None but now raises the not-found exception instead. This fixes a bug where returning None caused ex_delete_image to raise an AttributeError. (GITHUB-441) [Eric Johnson] - GCE driver update to support JSON format Service Account files and a PY3 fix from Siim Põder for LIBCLOUD-627. (LIBCLOUD-627, LIBCLOUD-657, GITHUB-438) [Eric Johnson] - GCE driver fixed for missing param on ex_add_access_config. (GITHUB-435) [Peter Mooshammer] - GCE driver support for HTTP load-balancer resources. (LIBCLOUD-605, GITHUB-429) [Lee Verberne] - GCE driver updated to make better use of GCEDiskTypes. (GITHUB-428) [Eric Johnson] - GCE driver list_images() now returns all non-deprecated images by default. (LIBCLOUD-602, GITHUB-423) [Eric Johnson] - Improve GCE API coverage for create_node(). (GITHUB-419) [Eric Johnson] - GCE Licenses added to the GCE driver. (GITHUB-420) [Eric Johnson] - GCE Projects support common instance metadata and usage export buckets. (GITHUB-409) [Eric Johnson] - Improvements to TargetPool resource in GCE driver. (GITHUB-414) [Eric Johnson] - Adding TargetInstances resource to GCE driver. (GITHUB-393) [Eric Johnson] - Adding DiskTypes resource to GCE driver. (GITHUB-391) [Eric Johnson] - Fix boot disk auto_delete in GCE driver. (GITHUB-412) [Igor Bogomazov] - Add Routes to GCE driver. (GITHUB-410) [Eric Johnson] - Add missing ``ubuntu-os-cloud`` images to the GCE driver. (LIBCLOUD-632, GITHUB-385) [Borja Martin] - Add new `us-east-2` and `us-east-3` region to the Joyent driver. (GITHUB-386) [Anthony Monthe] - Add missing t2. instance types to the us-west-1 region in the EC2 driver. (GITHUB-388) [Matt Lehman] - Add option to expunge VM on destroy in CloudStack driver. (GITHUB-382) [Roeland Kuipers] - Add extra attribute in list_images for CloudStack driver. (GITHUB-389) [Loic Lambiel] - Add ``ex_security_group_ids`` argument to the create_node method in the EC2 driver. This way users can launch VPC nodes with security groups. (GITHUB-373) [Itxaka Serrano] - Add description argument to GCE Network. (GITHUB-397) [Eric Johnson] - GCE: Improve MachineType (size) coverage of GCE API. (GITHUB-396) [Eric Johnson] - GCE: Improved Images coverage. (GITHUB-395) [Eric Johnson] - GCE: Support for global IP addresses. (GITHUB-390, GITHUB-394) [Eric Johnson] - GCE: Add missing snapshot attributes. (GITHUB-401) [Eric Johnson] - AWS: Set proper disk size in c3.X instance types. (GITHUB-405) [Itxaka Serrano] - Fix a bug with handling of the ``ex_keyname`` argument in the Softlayer driver. (GITHUB-416, LIBCLOUD-647) [Dustin Oberloh] - Update CloudSigma region list (remove Las Vegas, NV region and add new San Jose, CA and Miami, FL region). (GITHUB-417) [Viktor Petersson] - Add ``ex_get_node`` method to the Joyent driver. (GITHUB-421) [Anthony Monthe] - Add support for placement group management to the EC2 driver. (GITHUB-418) [Mikhail Ovsyannikov] - Add new tok02 region to the Softlayer driver. (GITHUB-436, LIBCLOUD-656) [Dustin Oberloh] - Add new Honolulu, HI endpoint to the CloudSigma driver. (GITHUB-439) [Stephen D. Spencer] - Fix a bug with config_drive attribute in the OpenStack driver. New versions of OpenStack now return a boolean and not a string. (GITHUB-433) [quilombo] - Add support for Abiquo API v3.x, remove support for now obsolete API v2.x. (GITHUB-433, LIBCLOUD-652) [David Freedman] - Allow rootdisksize parameter in create_node CloudStack driver (GITHUB-440, LIBCLOUD-658) [Loic Lambiel] Storage ~~~~~~~ - Allow user to pass ``headers`` argument to the ``upload_object`` and ``upload_object_via_stream`` method. This way user can specify CORS headers with the drivers which support that. (GITHUB-403, GITHUB-404) [Peter Schmidt] - Fix upload_object_via_stream so it works correctly under Python 3.x if user manually passes an iterator to the method. Also improve how reading a file in chunks works with drivers which support chunked encoding - always try to fill a chunk with CHUNK_SIZE bytes instead of directly streaming the chunk which iterator returns. Previously, if iterator returned 1 byte in one iteration, we would directly send this as a single chunk to the API. (GITHUB-408, LIBCLOUD-639) [Peter Schmidt] Loadbalancer ~~~~~~~~~~~~ - Updates to CloudStack driver. (GITHUB-434) [Jeroen de Korte] DNS ~~~ - New driver for Softlayer DNS service. (GITHUB-413, LIBCLOUD-640) [Vanč Levstik] - Fix a bug with ``ex_create_multi_value_record`` method in the Route53 driver only returning a single record. (GITHUB-431, LIBCLOUD-650) [Itxaka Serrano] Changes with Apache Libcloud 0.16.0 ----------------------------------- General ~~~~~~~ - Add new ``OpenStackIdentity_3_0_Connection`` class for working with OpenStack Identity (Keystone) service API v3. [Tomaz Muraus] - Add support for prettifying JSON or XML response body which is printed to a file like object when using ``LIBCLOUD_DEBUG`` environment variable. This option can be enabled by setting ``LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE`` environment variable. [Tomaz Muraus] - Add support for using an HTTP proxy for outgoing HTTP and HTTPS requests. [Tomaz Muraus, Philip Kershaw] Compute ~~~~~~~ - Fix an issue with ``LIBCLOUD_DEBUG`` not working correctly with the Linode driver. [Tomaz Muraus, Juan Carlos Moreno] (LIBCLOUD-598, GITHUB-342) - Add new driver for VMware vSphere (http://www.vmware.com/products/vsphere/) based clouds. [Tomaz Muraus] - Add two new default node states - ``NodeState.SUSPENDED`` and ``NodeState.ERROR``. [Tomaz Muraus] - Fix to join networks properly in ``deploy_node`` in the CloudStack driver. (LIBCLOUD-593, GITUHB-336) [Atsushi Sasaki] - Create ``CloudStackFirewallRule`` class and corresponding methods. (LIBCLOUD-594, GITHUB-337) [Atsushi Sasaki] - Add support for SSD disks to Google Compute driver. (GITHUB-339) [Eric Johnson] - Add utility ``get_regions`` and ``get_service_names`` methods to the ``OpenStackServiceCatalog`` class. [Andrew Mann, Tomaz Muraus] - Fix a bug in ``ex_get_console_output`` in the EC2 driver which would cause an exception to be thrown if there was no console output for a particular node. Reported by Chris DeRamus. [Tomaz Muraus] - Add ip_address parameter in CloudStack driver ``create_node`` method. (GITHUB-346) [Roeland Kuipers] - Fix ``ParamikoSSHClient.run`` and ``deploy_node`` method to work correctly under Python 3. (GITHUB-347) [Eddy Reyes] - Update OpenStack driver to map more node states to states recognized by Libcloud. [Chris DeRamus] - Fix a bug with ``ex_metadata`` argument handling in the Google Compute Engine driver ``create_node`` method. (LIBCLOUD-544, GITHUB-349, GITHUB-353) [Raphael Theberge] - Add SSH key pair management methods to the Softlayer driver. (GITHUB-321, GITHUB-354) [Itxaka Serrano] - Correctly categorize node IP addresses into public and private when dealing with OpenStack floating IPs. [Andrew Mann] - Add new t2 instance types to the EC2 driver. [Tomaz Muraus] - Add support for Amazon GovCloud to the EC2 driver (us-gov-west-1 region). [Chris DeRamus] - Allow user to pass "gp2" for "ex_volume_type" argument to the create_volume method in the EC2 driver. Reported by Xavier Barbosa. [Tomaz Muraus, Xavier Barbosa] - Add new driver for ProfitBricks provider. (LIBCLOUD-589, GITHUB-352) [Matt Baldwin] - Various improvements and bugs fixes in the GCE driver. For a list, see https://github.com/apache/libcloud/pull/360/commits (GITHUB-360) [Evgeny Egorochkin] - Allow user to specify virtualization type when registering an EC2 image by passing ``virtualization_type`` argument to the ``ex_register_image`` method. (GITHUB-361) [Andy Grimm] - Add ``ex_create_image`` method to the GCE driver. (GITHUB-358, LIBCLOUD-611) [Katriel Traum] - Add some methods to CloudStack driver: create_volume_snapshot, list_snapshots, destroy_volume_snapshot create_snapshot_template, ex_list_os_types) (GITHUB-363, LIBCLOUD-616) [Oleg Suharev] - Added VPC support and Egress Firewall rule support fo CloudStack (GITHUB-363) [Jeroen de Korte] - Add additional attributes to the ``extra`` dictionary of OpenStack StorageVolume object. (GITHUB-366) [Gertjan Oude Lohuis] - Fix ``create_volume`` method in the OpenStack driver to return a created volume object (instance of StorageVolume) on success, instead of a boolean indicating operation success. (GITHUB-365) [Gertjan Oude Lohuis] - Add optional project parameters for ex_list_networks() to CloudStack driver (GITHUB-367, LIBCLOUD-615) [Rene Moser] - CLOUDSTACK: option to start VM in a STOPPED state (GITHUB-368) [Roeland Kuipers] - Support "config_drive" in the OpenStack driver. Allow users to pass ``ex_config_drive`` argument to the ``create_node`` and ``ex_rebuild_node`` method. (GITHUB-370) [Nirmal Ranganathan] - Add support for service scopes to the ``create_node`` method in the GCE driver. (LIBCLOUD-578, GITHUB-373) [Eric Johnson] - Update GCE driver to allow for authentication with internal metadata service. (LIBCLOUD-625, LIBCLOUD-276, GITHUB-276) [Eric Johnson] - Fix a bug in Elasticstack node creation method where it would raise exceptions because of missing data in a response, and also fix pulling the IP from the proper data item. (GITHUB-325) [Michael Bennett] - Fix a bug which prevented user to connect and instantiate multiple EC2 driver instances for different regions at the same time. (GITHUB-325) [Michael Bennett] - Add methods in CloudStack driver to manage mutiple nics per vm. (GITHUB-369) [Roeland Kuipers] - Implements VPC network ACLs for CloudStack driver. (GITHUB-371) [Jeroen de Korte] Storage ~~~~~~~ - Fix a bug with CDN requests in the CloudFiles driver. [Tomaz Muraus] - Fix a bug with not being able to specify meta_data / tags when uploading an object using Google Storage driver. (LIBCLOUD-612, GITHUB-356) [Stefan Friesel] Loadbalancer ~~~~~~~~~~~~ - Allow user to specify session affinity algorithm in the GCE driver by passing ``ex_session_affinity`` argument to the ``create_balancer`` method. (LIBCLOUD-595, GITHUB-341) [Lee Verberne, Eric Johnson] DNS ~~~ - Various fixes in the Google DNS driver. (GITHUB-378) [Franck Cuny] Changes with Apache Libcloud 0.15.1 ----------------------------------- Compute ~~~~~~~ - Allow user to limit a list of subnets which are returned by passing ``subnet_ids`` and ``filters`` argument to the ``ex_list_subnets`` method in the EC2 driver. (LIBCLOUD-571, GITHUB-306) [Lior Goikhburg] - Allow user to limit a list of internet gateways which are returned by passing ``gateway_ids`` and ``filters`` argument to the ``ex_list_internet_gateways`` method in the EC2 driver. (LIBCLOUD-572, GITHUB-307) [Lior Goikhburg] - Allow user to filter which nodes are returned by passing ``ex_filters`` argument to the ``list_nodes`` method in the EC2 driver. (LIBCLOUD-580, GITHUB-320) [Lior Goikhburg] - Add network_association_id to ex_list_public_ips and CloudstackAddress object (GITHUB-327) [Roeland Kuipers] - Allow user to specify admin password by passing ``ex_admin_pass`` argument to the ``create_node`` method in the Openstack driver. (GITHUB-315) [Marcus Devich] - Fix a possible race condition in deploy_node which would occur if node is online and can be accessed via SSH, but the SSH key we want to use hasn't been installed yet. Previously, we would immediately throw if we can connect, but the SSH key hasn't been installed yet. (GITHUB-331) [David Gay] - Propagate an exception in ``deploy_node`` method if user specified an invalid path to the private key file. Previously this exception was silently swallowed and ignored. [Tomaz Muraus] DNS ~~~ - Include a better message in the exception which is thrown when a request in the Rackspace driver ends up in an ``ERROR`` state. [Tomaz Muraus] Changes with Apache Libcloud 0.15.0 ----------------------------------- General ~~~~~~~ - Use lxml library (if available) for parsing XML. This should substantially reduce parsing time and memory usage for large XML responses (e.g. retrieving all the available images in the EC2 driver). [Andrew Mann] - Use --head flag instead of -X HEAD when logging curl lines for HEAD requests in debug mode. Reported by Brian Metzler. (LIBCLOUD-552) [Tomaz Muraus] - Fix Python 3 compatibility bugs in the following functions: * import_key_pair_from_string in the EC2 driver * publickey._to_md5_fingerprint * publickey.get_pubkey_ssh2_fingerprint (GITHUB-301) [Csaba Hoch] - Update CA_CERTS_PATH to also look for CA cert bundle which comes with openssl Homebrew formula on OS x (/usr/local/etc/openssl/cert.pem). (GITHUB-309) [Pedro Romano] - Update Google drivers to allow simultaneous authornization for all the supported Google Services. (GITHUB-302) [Eric Johnson] Compute ~~~~~~~ - Fix create_key_pair method which was not returning private key. (LIBCLOUD-566) [Sebastien Goasguen] - Map "Stopped" node state in the CloudStack driver to NodeState.STOPPED instead of NodeState.TERMINATED, "Stopping" to NodeState.PENDING instead of NodeState.TERMINATED and "Expunging" to NodeState.PENDING instead of NodeState.TERMINATED. (GITHUB-246) [Chris DeRamus, Tomaz Muraus] - Add ex_create_tags and ex_delete_tags method to the CloudStack driver. (LIBCLOUD-514, GITHUB-248) [Chris DeRamus] - Add new G2 instances to the EC2 driver. [Tomaz Muraus] - Add support for multiple API versions to the Eucalyptus driver and allows user to pass "api_version" argument to the driver constructor. (LIBCLOUD-516, GITHUB-249) [Chris DeRamus] - Map "Powered Off" state in the vCloud driver from "TERMINATED" to "STOPPED". (GITHUB-251) [Ash Berlin] - Add ex_rename_node method to the DigitalOcean driver. (GITHUB-252) [Rahul Ranjan] - Improve error parsing in the DigitalOcean driver. Reported by Deni Bertovic. [Tomaz Muraus] - Add extension methods for the VPC internet gateway management to the EC2 driver. (LIBCLOUD-525, GITHUB-255) [Chris DeRamus] - Add CloudStackProject class to the CloudStack driver and add option to select project and disk offering on node creation. (LIBCLOUD-526, GITHUB-257) [Jim Divine] - Fix IP address handling in the OpenStack driver. (LIBCLOUD-503, GITHUB-235) [Markos Gogoulos] - Ad new ex_delete_image and ex_deprecate_image method to the GCE driver. (GITHUB-260) [Franck Cuny] - Ad new ex_copy_image method to the GCE driver. (GITHUB-258) [Franck Cuny] - Ad new ex_set_volume_auto_delete method to the GCE driver. (GITHUB-264) [Franck Cuny] - Add ex_revoke_security_group_ingress method to the CloudStack driver. [Chris DeRamus, Tomaz Muraus] - Allow user to pass ex_ebs_optimized argument to the create_node method in the EC2 driver. (GITHUB-272) [zerthimon] - Add "deprecated" attribute to the Node object in the Google Compute Engine driver. (GITHUB-276) [Chris / bassdread] - Update Softlayer driver to use "fullyQualifiedDomainName" instead of "hostname" attribute for the node name. (GITHUB-280) [RoelVanNyen] - Allow user to specify target tags using target_tags attribute when creating a firewall rule in the GCE driver. (GITHUB-278) [zerthimon] - Add new standard API for image management and initial implementation for the EC2 and Rackspace driver. (GITHUB-277) [Matt Lehman] - Allow user to specify "displayname" attribute when creating a CloudStack node by passing "ex_displayname" argument to the method. Also allow "name" argument to be empty (None). This way CloudStack automatically uses Node's UUID for the name. (GITHUB-289) [Jeff Moody] - Deprecate "key" argument in the SSHClient class in favor of new "key_files" argument. Also add a new "key_material" argument. This argument can contain raw string version of a private key. Note 1: "key_files" and "key_material" arguments are mutually exclusive. Note 2: "key_material" argument is not supported in the ShellOutSSHClient. - Use node id attribute instead of the name for the "lconfig" label in the Linode driver. This way the label is never longer than 48 characters. (GITHUB-287) [earthgecko] - Add a new driver for Outscale SAS and Outscale INC cloud (http://www.outscale.com). (GITHUB-285, GITHUB-293, LIBCLOUD-536, LIBCLOUD-553) [Benoit Canet] - Add new driver for HP Public Cloud (Helion) available via Provider.HPCLOUD constant. [Tomaz Muraus] - Allow user to specify availability zone when creating an OpenStack node by passing "ex_availability_zone" argument to the create_node method. Note: This will only work if the OpenStack installation is running availability zones extension. (GITHUB-295, LIBCLOUD-555) [syndicut] - Allow user to pass filters to ex_list_networks method in the EC2 driver. (GITHUB-294) [zerthimon] - Allow user to retrieve container images using ex_get_image method in the Google Compute Engine driver. (GITHUB-299, LIBCLOUD-562) [Magnus Andersson] - Add new driver for Kili public cloud (http://kili.io/) [Tomaz Muraus] - Add "timeout" argument to the ParamikoSSHClient.run method. If this argument is specified and the command passed to run method doesn't finish in the defined timeout, `SSHCommandTimeoutError` is throw and the connection to the remote server is closed. Note #1: If timed out happens, this functionality doesn't guarantee that the underlying command will be stopped / killed. The way it works it simply closes a connect to the remote server. [Tomaz Muraus] Note #2: "timeout" argument is only available in the Paramiko SSH client. - Make "cidrs_ips" argument in the ex_authorize_security_group_egress method in the EC2 driver mandatory. (GITHUB-301) [Csaba Hoch] - Add extension methods for managing floating IPs (ex_get_floating_ip, ex_create_floating_ip, ex_delete_floating_ip) to the Openstack 1.1 driver. (GITHUB-301) [Csaba Hoch] - Fix bug in RimuHosting driver which caused driver not to work when the provider returned compressed (gzip'ed) response. (LIBCLOUD-569, GITHUB-303) [amastracci] - Fix issue with overwriting the server memory values in the RimuHosting driver. (GUTHUB-308) [Dustin Oberloh] - Add ex_all_tenants argument to the list_nodes method in the OpenStack driver. (GITHUB-312) [LIBCLOUD-575, Zak Estrada] - Add support for network management for advanced zones (ex_list_network_offerings, ex_create_network, ex_delete_network) in the CloudStack driver. (GITHUB-316) [Roeland Kuipers] - Add extension methods for routes and route table management to the EC2 driver (ex_list_route_tables, ex_create_route_table, ex_delete_route_table, ex_associate_route_table, ex_dissociate_route_table, ex_replace_route_table_association, ex_create_route, ex_delete_route, ex_replace_route) (LIBCLOUD-574, GITHUB-313) [Lior Goikhburg] - Fix ex_list_snapshots for HP Helion OpenStack based driver. [Tomaz Muraus] - Allow user to specify volume type and number of IOPS when creating a new volume in the EC2 driver by passing ``ex_volume_type`` and ``ex_iops`` argument to the ``create_volume`` method. [Tomaz Muraus] - Fix ex_unpause_node method in the OpenStack driver. (GITHUB-317) [Pablo Orduña] - Allow user to launch EC2 node in a specific VPC subnet by passing ``ex_subnet`` argument to the create_node method. (GITHUB-318) [Lior Goikhburg] Storage ~~~~~~~ - Fix container name encoding in the iterate_container_objects and get_container_cdn_url method in the CloudFiles driver. Previously, those methods would throw an exception if user passed in a container name which contained a whitespace. Reported by Brian Metzler. (LIBCLOUD-552) [Tomaz MUraus] - Fix a bug in the OpenStack Swift driver which prevented the driver to work with installations where region names in the service catalog weren't upper case. (LIBCLOUD-576, GITHUB-311) [Zak Estrada] Load Balancer ~~~~~~~~~~~~~ - Add extension methods for policy managagement to the ELB driver. (LIBCLOUD-522, GITHUB-253) [Rahul Ranjan] DNS ~~~ - Fix update_record method in the Route56 driver so it works correctly for records with multiple values. [Tomaz Muraus] - Add ex_create_multi_value_record method to the Route53 driver which allows user to create a record with multiple values with a single call. [Tomaz Muraus] - Add new driver for Google DNS. (GITHUB-269) [Franck Cuny] Changes with Apache Libcloud 0.14.1 ----------------------------------- Compute ~~~~~~~ - Add new m3.medium and m3.large instance information to the EC2 driver. [Tomaz Muraus] - Add a new driver for CloudSigma API v2.0. [Tomaz Muraus] - Add "volume_id" attribute to the Node "extra" dictionary in the EC2 driver. Also fix the value of the "device" extra attribute in the StorageVolume object. (LIBCLOUD-501) [Oleg Suharev] - Add the following extension methods to the OpenStack driver: ex_pause_node, ex_unpause_node, ex_suspend_node, ex_resume_node. (LIBCLOUD-505, GITHUB-238) [Chris DeRamus] - Add ex_limits method to the CloudStack driver. (LIBCLOUD-507, GITHUB-240) [Chris DeRamus] - Add "extra" dictionary to the CloudStackNode object and include more attributes in the "extra" dictionary of the network and volume object. (LIBCLOUD-506, GITHUB-239) [Chris DeRamus] - Add ex_register_image method to the EC2 driver. (LIBCLOUD-508, GITHUB-241) [Chris DeRamus] - Add methods for managing volume snapshots to the OpenStack driver. (LIBCLOUD-512, GITHUB-245) [Chris DeRamus] Load Balancer ~~~~~~~~~~~~~ - Fix a bug in the ex_targetpool_add_node and ex_targetpool_remove_node method in the GCE driver. [Rick Wright] Storage ~~~~~~~ - Allow user to use an internal endpoint in the CloudFiles driver by passing "use_internal_url" argument to the driver constructor. (GITHUB-229, GITHUB-231) [John Obelenus] DNS ~~~ - Add PTR to the supported record types in the Rackspace driver. [Tomaz Muraus] - Fix Zerigo driver to set Record.name attribute for records which refer to the bare domain to "None" instead of an empty string. [Tomaz Muraus] - For consistency with other drivers, update Rackspace driver to set Record.name attribute for the records which refer to the bare domain to "None" instead of setting them to FQDN. [Tomaz Muraus] - Update Rackspace driver to support paginating through zones and records. (GITHUB-230) [Roy Wellington] - Update Route53 driver so it supports handling records with multiple values (e.g. MX). (LIBCLOUD-504, GITHUB-237) [Chris DeRamus] - Update Route53 driver to better handle SRV records. [Tomaz Muraus] - Update Route53 driver, make sure "ttl" attribute in the Record extra dictionary is always an int. [Tomaz Muraus] Changes with Apache Libcloud 0.14.0 ----------------------------------- General ~~~~~~~ - Update API endpoints which are used in the HostVirtual drivers. (LIBCLOUD-489) [Dinesh Bhoopathy] - Add support for Amazon security token to the Amazon drivers. (LIBCLOUD-498, GITHUB-223) [Noah Kantrowitz] Compute ~~~~~~~ - Remove Slicehost driver. SliceHost API has been shut down in 2012 so it makes no sense to keep this driver. [Tomaz Muraus] - Modify drivers for public cloud providers which use HTTP Basic authentication to not allow insecure connections (secure constructor kwarg being set to False) by default. This way credentials can't accidentally be sent in plain text over the write. Affected drivers: Bluebox, Joyent, NephoScale, OpSource, VPSNet [Tomaz Muraus] - Remove "public_ip" and "private_ip" property which has been deprecated in 0.7.0 from the Node object. [Tomaz Muraus] - Move "is_private_ip" and "is_valid_ip_address" function from libcloud.compute.base into libcloud.utils.networking module. [Tomaz Muraus] - Allow user to pass "url" argument to the CloudStack driver constructor. This argument can be provided instead of "host" and "path" arguments and can contain a full URL to the API endpoint. (LIBCLOUD-430) [Tomaz Muraus] - Allow user to pass None as a "location" argument to the create_node method. (LIBCLOUD-431) [Tomaz Muraus] - Refactor CloudStack Connection class so it looks more like other connection classes and user can specify which attributes to send as part of query parameters in the GET request and which inside the body of a POST request. [Tomaz Muraus, Philipp Strube] - Add a new driver for Exoscale (https://www.exoscale.ch/) provider. [Tomaz Muraus] - Fix a bug in Abiquo driver which caused the driver to fail if the endpoint URL didn't start with "/api". (LIBCLOUD-447) Reported by Igor Ajdisek. [Tomaz Muraus] - Modify CloudStack driver to correctly throw InvalidCredsError exception if invalid credentials are provided. [Tomaz Muraus] - Don't throw an exception if a node object is missing an "image" attribute in the list nodes / get node response. This could happen if node is in an error state. (LIBCLOUD-455) [Dustin Spicuzza, Tomaz Muraus] - Update CloudStack driver to better handle errors and throw ProviderError instead of a generic Exception. [Tomaz Muraus] - Modify ex_list_networks methods in CloudStack driver to not thrown if there are no networks available. [Tomaz Muraus] - Bump API version used in the EC2 driver from 2010-08-21 to 2013-10-15. (LIBCLOUD-454) [Tomaz Muraus] - Add ex_get_limits method for retrieving account resource limits to the EC2 driver. [Tomaz Muraus] - Update us-west-1 region in the EC2 driver to include c3 instance types. Also include pricing information. [Tomaz Muraus] - For consistency, rename "ex_add_ip_forwarding_rule" method to "ex_create_ip_forwarding_rule". (GITHUB-196) [Oleg Suharev] - Add support for new "i2" instance types to Amazon EC2 driver. Also update pricing file. (LIBCLOUD-465) [Chris DeRamus] - Allow user to specify VPC id when creating a security group in the EC2 driver by passing "vpc_id" argument to ex_create_security_group method. (LIBCLOUD-463, GITHUB-201) [Chris DeRamus] - Add extension methods for managing security group rules (ex_authorize_security_group_ingress, ex_authorize_security_group_egress, ex_revoke_security_group_ingress, ex_revoke_security_group_egress) to the EC2 driver. (LIBCLOUD-466, GITHUB-202) [Chris DeRamus] - Add extension methods for deleting security groups (ex_delete_security_group, ex_delete_security_group_by_id, ex_delete_security_group_by_name) to the EC2 driver. (LIBCLOUD-464, GITHUB-199) [Chris DeRamus] - Add extension method for listing reserved instances (ex_list_reserved_nodes) to the EC2 driver. (LIBCLOUD-469, GITHUB-205) [Chris DeRamus] - Add extension methods for VPC management (ex_list_networks, ex_create_network, ex_delete_network) to the EC2 driver. (LIBCLOUD-467, GITHUB-203) [Chris DeRamus] - Add extension methods for VPC subnet management (ex_list_subnets, ex_create_subnet, ex_delete_subnet) to the EC2 driver. (LIBCLOUD-468, GITHUB-207) [Chris DeRamus] - Add ex_get_console_output extension method to the EC2 driver. (LIBCLOUD-471, GITHUB-209) [Chris DeRamus] - Include additional provider-specific attributes in the 'extra' dictionary of the StorageVolume class in the EC2 driver. (LIBCLOUD-473, GITHUB-210) [Chris DeRamus] - Change attribute name in the 'extra' dictionary of EC2 and CloudStack Node object from "keyname" to "key_name". (LIBCLOUD-475) [Oleg Suharev] - Fix a deployment issue which would some times cause a process to hang if the executed deployment script printed a lot of output to stdout or stderr. [Tomaz Muraus] - Add additional attributes to the "extra" dictionary of the VolumeSnapshot object in the EC2 driver. Also modify create_volume_snapshot method to correctly handle "name" argument. Previous, "name" argument was used as a snapshot description, now it's used as a Tag with a key "Name". (LIBCLOUD-480, GITHUB-214) [Chris DeRamus] - Store additional attributes (iops, tags, block_device_mapping) in the "extra" dictionary of the NodeImage object in the EC2 driver. Also fix ex_image_ids filtering in the list_images method. (LIBCLOUD-481, GITHUB-215) [Chris DeRamus] - Add extension methods for network interface management (ex_list_network_interfaces, ex_create_network_interface, ex_attach_network_interface_to_node, ex_detach_network_interface, ex_delete_network_interface) to the EC2 driver. (LIBCLOUD-474) [Chris DeRamus] - Update Google Compute Engine driver to use and work with API v1. (LIBCLOUD-450) [Rick Wright] - Modify ParamikoSSHClient so that "password" and "key" arguments are not mutually exclusive and both can be provided. (LIBCLOUD-461, GITHUB-194) [Markos Gogoulos] - Add extension methods for the Elastic IP management to the EC2 driver. Also modify "ex_allocate_address" and "ex_release_address" method to take "domain" argument so it also works with VPC. (LIBCLOUD-470, GITHUB-208, GITHUB-220) [Chris DeRamus] - Add additional provider specific attributes to the "extra" dictionary of the Node object in the EC2 driver. (LIBCLOUD-493, GITHUB-221) [Chris DeRamus] - Add ex_copy_image and ex_create_image_from_node method to the EC2 driver. (LIBCLOUD-494, GITHUB-222) [Chris DeRamus] Storage ~~~~~~~ - Allow user to specify 'Content-Disposition' header in the CloudFiles driver by passing 'content_disposition' key in the extra dictionary of the upload object methods. (LIBCLOUD-430) [Michael Farrell] - Fix CloudFiles driver so it references a correct service catalog entry for the CDN endpoint. This was broken in the 0.14.0-beta3 release when we migrated all the Rackspace drivers to use auth 2.0 by default. (GITHUB-186) [John Obelenus] - Update storage drivers to default to "application/octet-stream" Content-Type if none is provided and none can be guessed. (LIBCLOUD-433) [Michael Farrell] - Fix a bug so you can now upload 0 bytes sized objects using multipart upload in the S3 driver. (LIBCLOUD-490) Reported by Noah Kantrowitz. [Tomaz Muraus] - Update OpenStack Swift driver constructor so it accepts "region", "ex_force_service_type" and "ex_force_service_name" argument. [Tomaz Muraus] - Deprecate "CLOUDFILES_SWIFT" provider constant in favor of new "OPENSTACK_SWIFT" one. [Tomaz Muraus] - Add support for setting an ACL when uploading and object. (LIBCLOUD-497, GITHUB-223) [Noah Kantrowitz] - Modify get_container method to use a more efficient "HEAD" approach instead of calling list_containers + doing late filterting. (LIBCLOUD-498, GITHUB-223) [Noah Kantrowitz] DNS ~~~ - Implement iterate_* methods in the Route53 driver and makes it work correctly if there are more results which can fit on a single page. Previously, only first 100 results were returned. (LIBCLOUD-434) [Chris Clarke] - Update HostVirtual driver constructor to only take "key" and other valid arguments. Previously it also took "secret" argument which it silently ignored. (LIBCLOUD-483) Reported by Andrew Udvare. [Tomaz Muraus] - Fix list_records method in the HostVirtual driver. (LIBCLOUD-484, GITHUB-218) Reported by Andrew Udvare. [Dinesh Bhoopathy] Changes with Apache Libcloud 0.14.0-beta3 ----------------------------------------- General ~~~~~~~ - If the file exists, read pricing data from ~/.libcloud/pricing.json by default. If the file doesn't exist, fall back to the old behavior and use pricing data which is bundled with the release. [Tomaz Muraus] - Add libcloud.pricing.download_pricing_file function for downloading and updating the pricing file. [Tomaz Muraus] - Fix libcloud.utils.py3.urlquote so it works with unicode strings under Python 2. (LIBCLOUD-429) [Michael Farrell] Compute ~~~~~~~ - Refactor Rackspace driver classes and make them easier to use. Now there are two Rackspace provider constants - Provider.RACKSPACE which represents new next-gen OpenStack servers and Provider.RACKSPACE_FIRST_GEN which represents old first-gen cloud servers. Note: This change is backward incompatible. For more information on those changes and how to update your code, please visit "Upgrade Notes" documentation page - http://s.apache.org/lc0140un [Tomaz Muraus] - Deprecate the following EC2 provider constants: EC2_US_EAST, EC2_EU, EC2_EU_WEST, EC2_AP_SOUTHEAST, EC2_AP_NORTHEAST, EC2_US_WEST_OREGON, EC2_SA_EAST, EC2_SA_EAST and replace it with a new EC2 constant. Driver referenced by this new constant now takes a "region" argument which dictates to which region to connect. Note: Deprecated constants will continue to work until the next major release. For more information on those changes and how to update your code, please visit "Upgrade Notes" documentation page - http://s.apache.org/lc0140un [Tomaz Muraus] - Add support for volume related functions to OpenNebula driver. (LIBCLOUD-354) [Emanuele Rocca] - Add methods for managing storage volumes to the OpenStack driver. (LIBCLOUD-353) [Bernard Kerckenaere] - Add new driver for Google Compute Engine (LIBCLOUD-266, LIBCLOUD-386) [Rick Wright] - Fix create_node "features" metadata and update affected drivers. (LIBCLOUD-367) [John Carr] - Update EC2 driver to accept the auth kwarg (it will accept NodeAuthSSH objects and automatically import a public key that is not already uploaded to the EC2 keyring). (Follow on from LIBCLOUD-367). [John Carr] - Unify extension argument names for assigning a node to security groups in EC2 and OpenStack driver. Argument in the EC2 driver has been renamed from ex_securitygroup to ex_security_groups. For backward compatibility reasons, old argument will continue to work until the next major release. (LIBCLOUD-375) [Tomaz Muraus] - Add ex_import_keypair_from_string and ex_import_keypair method to the CloudStack driver. (LIBCLOUD-380) [Sebastien Goasguen] - Add support for managing floating IP addresses to the OpenStack driver. (LIBCLOUD-382) [Ivan Kusalic] - Add extension methods for handling port forwarding to the CloudStack driver, rename CloudStackForwardingRule class to CloudStackIPForwardingRule. (LIBCLOUD-348, LIBCLOUD-381) [sebastien goasguen] - Hook up deploy_node functionality in the CloudStack driver and unify extension arguments for handling security groups. (LIBCLOUD-388) [sebastien goasguen] - Allow user to pass "args" argument to the ScriptDeployment and ScriptFileDeployment class. This argument tells which command line arguments get passed to the ScriptDeployment script. (LIBCLOUD-394) Note: This change is backward incompatible. For more information on how this affects your code and how to update it, visit "Upgrade Notes" documentation page - http://s.apache.org/lc0140un [Tomaz Muraus] - Allow user to specify IAM profile to use when creating an EC2 node. (LIBCLOUD-403) [Xavier Barbosa] - Add support for keypair management to the OpenStack driver. (LIBCLOUD-392) [L. Schaub] - Allow user to specify disk partitioning mode using ex_disk_config argument in the OpenStack based drivers. (LIBCLOUD-402) [Brian Curtin] - Add new driver for NephoScale provider (http://nephoscale.com/). (LIBCLOUD-404) [Markos Gogoulos] - Update network related extension methods so they work correctly with both, OpenStack and Rackspace driver. (LIBCLOUD-368) [Tomaz Muraus] - Add tests for networking functionality in the OpenStack and Rackspace driver. [Tomaz Muraus] - Allow user to pass all supported extension arguments to ex_rebuild_server method in the OpenStack driver. (LIBCLOUD-408) [Dave King] - Add pricing information for Rackspace Cloud Sydney region. [Tomaz Muraus] - Update EC2 instance type map and pricing data. High Storage instances are now also available in Sydney and Singapore region. [Tomaz Muraus] - Add new methods for managing storage volumes and snapshots to the EC2 driver (list_volumes, list_snapshots, destroy_volume_snapshot, create_volume_snapshot) (LIBCLOUD-409) [Oleg Suharev] - Add the following new extension methods to EC2 driver: ex_destroy_image, ex_modify_instance_attributes, ex_delete_keypair. (LIBCLOUD-409) [Oleg Suharev] - Allow user to specify a port range when creating a port forwarding rule. (LIBCLOUD-409) [Oleg Suharev] - Align Joyent driver with other drivers and deprecate "location" argument in the driver constructor in favor of "region" argument. Note: Deprecated argument will continue to work until the next major release. [Tomaz Muraus] - Deprecate the following ElasticHosts provider constants: ELASTICHOSTS_UK1, ELASTICHOSTS_UK2, ELASTICHOSTS_US1, ELASTICHOSTS_US2, ELASTICHOSTS_US3, ELASTICHOSTS_CA1, ELASTICHOSTS_AU1, ELASTICHOSTS_CN1 and replace it with a new ELASTICHOSTS constant. Driver referenced by this new constant now takes a "region" argument which dictates to which region to connect. Note: Deprecated constants will continue to work until the next major release. For more information on those changes and how to update your code, please visit "Upgrade Notes" documentation page - http://s.apache.org/lc0140un (LIBCLOUD-383) [Michael Bennett, Tomaz Muraus] - Add log statements to our ParamikoSSHClient wrapper. This should make debugging deployment issues easier. (LIBCLOUD-414) [Tomaz Muraus] - Add new "NodeState.STOPPED" node state. Update HostVirual and EC2 driver to also recognize this new state. (LIBCLOUD-296) [Jayy Vis] - Add new Hong Kong endpoint to Rackspace driver. [Brian Curtin] - Fix ex_delete_keypair method in the EC2 driver. (LIBCLOUD-415) [Oleg Suharev] - Add the following new extension methods for elastic IP management to the EC2 driver: ex_allocate_address, ex_disassociate_address, ex_release_address. (LIBCLOUD-417) [Patrick Armstrong] - For consistency and accuracy, rename "ex_associate_addresses" method in the EC2 driver to "ex_associate_address_with_node". Note: Old method will continue to work until the next major release. [Tomaz Muraus] - Add new driver for CloudFrames (http://www.cloudfounders.com/CloudFrames) provider. (LIBCLOUD-358) [Bernard Kerckenaere] - Update default kernel versions which are used when creating a Linode server. Old default kernel versions: - x86 - 2.6.18.8-x86_64-linode1 - x86_64 - 2.6.39.1-linode34 New default kernel versions: - x86 - 3.9.3-x86-linode52 - x86_64 - 3.9.3-x86_64-linode33 (LIBCLOUD-424) [Tomaz Muraus, Jon Chen] - Disable cache busting functionality in the OpenStack and Rackspace next-gen driver and enable it only for Rackspace first-gen driver. [Tomaz Muraus] - Update Google Compute Engine driver to v1beta16. [Rick Wright] - Modify auth_url variable in the OpenStack drivers so it works more like users would expect it to. Previously path specified in the auth_url was ignored and only protocol, hostname and port were used. Now user can provide a full url for the auth_url variable and the path provided in the url is also used. [DaeMyung Kang, Tomaz Muraus] - Allow user to associate arbitrary key/value pairs with a node by passing "ex_metadata" argument (dictionary) to create_node method in the EC2 driver. Those values are associated with a node using tags functionality. (LIBCLOUD-395) [Ivan Kusalic] - Add "ex_get_metadata" method to EC2 and OpenStack driver. This method reads metadata dictionary from the Node object. (LIBCLOUD-395) [Ivan Kusalic] - Multiple improvements in the Softlayer driver: - Map "INITIATING" node state to NodeState.PENDING - If node is launching remap "halted" state to "pending" - Add more node sizes - Add ex_stop_node and ex_start_node method - Update tests response fixtures (LIBCLOUD-416) [Markos Gogoulos] - Modify list_sizes method in the KT UCloud driver to work, even if the item doesn't have 'diskofferingid' attribute. (LIBCLOUD-435) [DaeMyung Kang] - Add new c3 instance types to the EC2 driver. [Tomaz Muraus] - Fix an issue with the ex_list_keypairs and ex_list_security_groups method in the CloudStack driver which caused an exception to be thrown if the API returned no keypairs / security groups. (LIBCLOUD-438) [Carlos Reategui, Tomaz Muraus] - Fix a bug in the OpenStack based drivers with not correctly checking if the auth token has expired before re-using it. (LIBCLOUD-428) Reported by Michael Farrell. [Tomaz Muraus, Michael Farrell] Storage ~~~~~~~ - Deprecate CLOUDFILES_US and CLOUDFILES_UK provider constant and replace it with a new CLOUDFILES constant. Driver referenced by this new constant takes a "region" keyword argument which can be one of 'ord', 'dfw', 'iad', 'syd', 'lon'. Note: Deprecated constants will continue to work until the next major release. For more information on this change, please visit "Upgrade Notes" documentation section - http://s.apache.org/lc0140un [Tomaz Muraus] - Allow users to filter objects starting with a prefix by passing ex_prefix argument to the list_container_objects method in the S3, Google Storage and CloudFiles driver. (LIBCLOUD-369) [Stefan Friesel] - Fix an issue with mutating connectionCls.host attribute in the Azure driver. This bug prevented user from having multiple Azure drivers with different keys instantiated at the same time. (LIBCLOUD-399) [Olivier Grisel] - Add a new driver for KT UCloud based on the OpenStack Swift driver. (LIBCLOUD-431). [DaeMyung Kang] Load Balancer ~~~~~~~~~~~~~ - Deprecate RACKSPACE_US and RACKSPACE_UK provider constant and replace it with a new RACKSPACE constant. Driver referenced by this new constant takes a "region" keyword argument which can be one of the following: 'ord', 'dfw', 'iad', 'syd', 'lon'. Note: Deprecated constants will continue to work until the next major release. For more information on this change, please visit "Upgrade Notes" documentation section - http://s.apache.org/lc0140un [Tomaz Muraus] - Add new driver for Google Compute Engine (LIBCLOUD-386) [Rick Wright] - Add new Hong Kong endpoint to Rackspace driver. [Brian Curtin] DNS ~~~ - Deprecate RACKSPACE_US and RACKSPACE_UK provider constant and replace it with a new RACKSPACE constant. Driver referenced by this new constant takes a "region" keyword argument which can be one of the following: 'us', 'uk'. Note: Deprecated constants will continue to work until the next major release. For more information on this change, please visit "Upgrade Notes" documentation section - http://s.apache.org/lc0140un [Tomaz Muraus] - Use string instead of integer for RecordType ENUM value. Note: If you directly use an integer instead of RecordType ENUM class you need to update your code to use the RecordType ENUM otherwise the code won't work. For more information on how to do that, see "Upgrade Notes" documentation section - http://s.apache.org/lc0140un [Tomaz Muraus] - Add "export_zone_to_bind_format" and export_zone_to_bind_zone_file method which allows users to export Libcloud Zone to BIND zone format. (LIBCLOUD-398) [Tomaz Muraus] - Update issue with inexistent zone / record handling in the get_zone and get_record method in the Linode driver. Those issues were related to changes in the Linode API. (LIBCLOUD-425) [Jon Chen] Changes with Apache Libcloud 0.13.3 ----------------------------------- Compute ~~~~~~~ - Send "scrub_data" query parameter when destroying a DigitalOcean node. This will cause disk to be scrubbed (overwritten with 0's) when destroying a node. (LIBCLOUD-487) Note: This fixes a security issue with a potential leak of data contained on the destroyed node which only affects users of the DigitalOcean driver. (CVE-2013-6480) [Tomaz Muraus] Changes with Apache Libcloud 0.13.2 ----------------------------------- General ~~~~~~~ - Don't sent Content-Length: 0 header with POST and PUT request if "raw" mode is used. This fixes a regression which could cause broken behavior in some storage driver when uploading a file from disk. (LIBCLOUD-396) [Ivan Kusalic] Compute ~~~~~~~ - Added Ubuntu Linux 12.04 image to ElasticHost driver image list. (LIBCLOUD-364) [Bob Thompson] - Update ElasticHosts driver to store drive UUID in the node 'extra' field. (LIBCLOUD-357) [Bob Thompson] Storage ~~~~~~~ - Store last_modified timestamp in the Object extra dictionary in the S3 driver. (LIBCLOUD-373) [Stefan Friesel] Load Balancer ~~~~~~~~~~~~~ - Expose CloudStack driver directly through the Provider.CLOUDSTACK constant. [Tomaz Muraus] DNS ~~~ - Modify Zerigo driver to include record TTL in the record 'extra' attribute if a record has a TTL set. [Tomaz Muraus] - Modify values in the Record 'extra' dictionary attribute in the Zerigo DNS driver to be set to None instead of an empty string ('') if a value for the provided key is not set. [Tomaz Muraus] Changes with Apache Libcloud 0.13.1 ----------------------------------- General ~~~~~~~ - Fix a regression introduced in 0.13.0 and make sure to include Content-Length 0 with PUT and POST requests. (LIBCLOUD-362, LIBCLOUD-390) [Tomaz Muraus] Compute ~~~~~~~ - Fix a bug in the ElasticHosts driver and check for right HTTP status code when determining drive imaging success. (LIBCLOUD-363) [Bob Thompson] - Update Opsource driver to include node public ip address (if available). (LIBCLOUD-384) [Michael Bennett] Storage ~~~~~~~ - Fix a regression with calling encode_container_name instead of encode_object_name on object name in get_object method. Reported by Ben Meng (LIBCLOUD-366) [Tomaz Muraus] - Ensure that AWS S3 multipart upload works for small iterators. (LIBCLOUD-378) [Mahendra M] Changes with Apache Libcloud 0.13.0 ----------------------------------- General ~~~~~~~ - Add homebrew curl-ca-bundle path to CA_CERTS_PATH. This will make Libcloud use homebrew curl ca bundle file (if available) for server certificate validation. (LIBCLOUD-324) [Robert Chiniquy] - Modify OpenStackAuthConnection and change auth_token_expires attribute to be a datetime object instead of a string. [Tomaz Muraus] - Modify OpenStackAuthConnection to support re-using of the existing auth token if it's still valid instead of re-authenticating on every authenticate() call. [Tomaz Muraus] - Modify base Connection class to not send Content-Length header if body is not provided. [Tomaz Muraus] - Add the new error class ProviderError and modify InvalidCredsError to inherit from it. (LIBCLOUD-331) [Jayy Vis] Misc ---- - Add unittest2 library dependency for tests and update some tests to use it. [Tomaz Muraus] Compute ~~~~~~~ - Fix destroy_node method in the experimental libvirt driver. [Aymen Fitati] - Add ex_start_node method to the Joyent driver. (LIBCLOUD-319) [rszabo50] - Fix Python 3 compatibility issue in the ScriptFileDeployment class. (LIBCLOUD-321) [Arfrever Frehtes Taifersar Arahesis] - Add ex_set_metadata_entry and ex_get_metadata method to the VCloud driver. (LIBCLOUD-318) [Michel Samia] - Various improvements and bug-fixes in the VCloud driver. (LIBCLOUD-323) [Michel Samia] - Various bug fixes and improvements in the HostVirtual driver. (LIBCLOUD-249) [Dinesh Bhoopathy] - Modify list_sizes method in the OpenStack driver to include OpenStackNodeSize object which includes 'vcpus' attribute which holds a number of virtual CPUs for this size. (LIBCLOUD-325) [Carlo] - For consistency rename "ex_describe_keypairs" method in the EC2 driver to "ex_describe_keypair". [Tomaz Muraus] - Modify "ex_describe_keypair" method to return key fingerprint in the return value. (LIBCLOUD-326) [Andre Merzky, Tomaz Muraus] - Populate private_ips attribute in the CloudStack drive when returning a Node object from the create_node method. (LIBCLOUD-329) [Sebastien Goasguen, Tomaz Muraus] - Allow user to pass extra arguments via "extra_args" argument which are then passed to the "deployVirtualMachine" call in the CloudStack driver create_node method. (LIBCLOUD-330) [Sebastien Goasguen, Tomaz Muraus] - Update Gandi driver to handle new billing model. (LIBCLOUD-317) [Aymeric Barantal] - Fix a bug in the Linode driver and remove extra newline which is added when generating a random root password in create_node. (LIBCLOUD-334) [Juan Carlos Moreno] - Add extension methods for managing keypairs to the CloudStack driver. (LIBCLOUD-333) [sebastien goasguen] - Add extension methods for managing security groups to the CloudStack driver. (LIBCLOUD-332) [sebastien goasguen] - Add extension methods for starting and stoping the node to the CloudStack driver. (LIBCLOUD-338) [sebastien goasguen] - Fix old _wait_until_running method. (LIBCLOUD-339) [Bob Thompson] - Allow user to override default async task completion timeout by specifying ex_clone_timeout argument. (LIBCLOUD-340) [Michal Galet] - Fix a bug in the GoGrid driver get_uuid method. (LIBCLOUD-341) [Bob Thompson] - Fix a bug with deploy_node not respecting 'timeout' kwarg. [Kevin Carter] - Modify create_node method in CloudStack driver to return an instance of CloudStackNode and add a new "expunging" node state. (LIBCLOUD-345) [sebastien goasguen] - Update API endpoint hostnames in the ElasticHost driver and use hostnames which return a valid SSL certificate. (LIBCLOUD-346) [Bob Thompson] - Add ex_list_networks method and missing tests for list_templates to the CloudStack driver. (LIBCLOUD-349) [Philipp Strube] - Correctly throw InvalidCredsError if user passes invalid credentials to the DigitalOcean driver. [Tomaz Muraus] Storage ~~~~~~~ - Fix an issue with double encoding the container name in the CloudFiles driver upload_object method. Also properly encode container and object name used in the HTTP request in the get_container and get_object method. (LIBCLOUD-328) [Tomaz Muraus] Load Balancer ~~~~~~~~~~~~~ - Add ex_list_current_usage method to the Rackspace driver. Changes with Apache Libcloud 0.12.4 ----------------------------------- Compute ~~~~~~~ - Fix a regression in Softlayer driver caused by the xmlrpclib changes. (LIBCLOUD-310) [Jason Johnson] - Allow user to pass alternate ssh usernames to deploy_node (ssh_alternate_usernames kwarg) which are used for authentication if the default one doesn't work. (LIBCLOUD-309) [Chris Psaltis, Tomaz Muraus] - Fix a bug in EC2 list_locations method - 'name' attribute didn't contain a the right value. [Tomaz Muraus] - Add new ScriptFileDeployment deployment class which reads deploy script from a file. [Rudolf J Streif] - Add support for API version 5.1 to the vCloud driver and accept any value which is a multiple of four for ex_vm_memory kwarg in create_node method. (LIBCLOUD-314) [Trevor Powell] Storage ~~~~~~~ - Fix a regression with removed ex_force_service_region constructor kwarg in the CloudFiles driver. (LIBCLOUD-260) Changes with Apache Libcloud 0.12.3 ----------------------------------- General ~~~~~~~ - Fix Python 3.x related regressions. (LIBCLOUD-245) Reported by Arfrever Frehtes Taifersar Arahesis. [Tomaz Muraus] - Fix a regression introduced with recent xmlrpiclib changes which broke all the Gandi.net drivers. (LIBCLOUD-288) Reported by Hutson Betts. [Tomaz Muraus] - Improve deploy code to work correctly if the ssh user doesn't have access to the /root directory. Previously the ScriptDeployment script was stored in /root folder by default. Now it's stored in users home directory under filename ~/libcloud_deploymeny_.sh. (LIBCLOUD-302) Reported by rotem on #libcloud. [Tomaz Muraus] Compute ~~~~~~~ - Improve public and private IP address handling in OpenStack 1.1 driver. Assume every IP address which doesn't have a label "public" or "internet" is private. (LIBCLOUD-297) [Grischa Meyer, Tomaz Muraus] - Add new driver for DigitalOcean provider - https://www.digitalocean.com/. (LIBCLOUD-304) [Tomaz Muraus] - Fix a regression in ParamikoSSHClient.run method which caused this methid to only work as expected if you passed an absolute or a relative path to the script to it. (LIBCLOUD-278) [Tomaz Muraus] DNS ~~~ - Allow user to specify 'priority' extra argument when creating a MX or SRV record. [Brian Jinwright, Tomaz Muraus] Changes with Apache Libcloud 0.12.1 ----------------------------------- General ~~~~~~~ - Deprecate LazyList method of iteration over large paginated collections and use a new, more efficient generator based approach which doesn't require the iterator to be pre-exhausted and buffering all of the values in memory. Existing list_* methods which previously used LazyList class are preserving the old behavior and new iterate_* methods which use a new generator based approach have been added. (LIBCLOUD-254) [Mahendra M] - Replace old ENUM style provider constants and replace them with a string version. This change allows users to dynamically register new drivers using a new set_driver method. (LIBCLOUD-255) [Mahendra M] - Allow user to explicitly specify which CA file is used for verifying the server certificate by setting 'SSL_CERT_FILE' environment variable. Note: When this variable is specified, the specified path is the only CA file which is used to verifying the server certificate. (LIBCLOUD-283) [Tomaz Muraus, Erinn Looney-Triggs] - Add a common module (libcloud.common.xmlrpc) for handling XML-RPC requests using Libcloud http layer. Also refactor existing drivers which use xmlrpclib directly (VCL, Gandi, Softlayer) to use this module. This change allows drivers to support LIBCLOUD_DEBUG and SSL certificate validation functionality. Previously they have bypassed Libcloud http layer so this functionality was not available. (LIBCLOUD-288) [John Carr] Compute ~~~~~~~ - Fix string interpolation bug in __repr__ methods in the IBM SCE driver. (LIBCLOUD-242) [Tomaz Muraus] - Fix test failures which happened in Python 3.3 due to: - hash randomization - changes in xml.etree module - changes in xmlrpc module (LIBCLOUD-245) [Tomaz Muraus] - Improvements and additions in vCloud driver: - Expose generic query method (ex_query) - Provide functionality to get and set control access for vApps. This way created vApps can be shared between users/groups or everyone. (LIBCLOUD-251) [Michal Galet] - Update EC2 pricing data to reflect new, lower prices - http://aws.typepad.com/aws/2012/10/new-ec2-second-generation-standard-instances-and-price-reductions-1.html [Tomaz Muraus] - Update EC2 instance size to reflect new m3 instance types. Also refactor the code to make it easier to maintain. [Tomaz Muraus] - Add a new driver for HostVirtual (http://www.vr.org) provider. (LIBCLOUD-249) [Dinesh Bhoopathy] - Fix a bug where a numeric instead of a string value was used for the content-length header in VCloud driver. (LIBCLOUD-256) [Brian DeGeeter, Tomaz Muraus] - Add a new driver for new Asia Pacific (Sydney) EC2 region. [Tomaz Muraus] - Add support for managing security groups to the OpenStack driver. This patch adds the following extension methods: - ex_list_security_groups, ex_get_node_security_groups methods - ex_create_security_group, ex_delete_security_group - ex_create_security_group_rule, ex_delete_security_group_rule (LIBCLOUD-253) [L. Schaub] - Modify ElasticStack driver class to pass 'vnc auto' instead of 'vnc:ip auto' argument to the API when creating a server. It looks like 'vnc:ip' has been replaced with 'vnc'. [Rick Copeland, Tomaz Muraus] - Add new EC2 instance type - High Storage Eight Extra Large Instance (hs1.8xlarge). [Tomaz Muraus] - Map 'shutting-down' node state in EC2 driver to UNKNOWN. Previously it was mapped to TERMINATED. (LIBCLOUD-280) Note: This change is backward incompatible which means you need to update your code if you rely on the old behavior. [Tomaz Muraus, Marcin Kuzminski] - Change _wait_until_running method so it supports waiting on multiple nodes and make it public (wait_until_running). (LIBCLOUD-274) [Nick Bailey] - Add new EC2 instance type - High Memory Cluster Eight Extra Large. (cr1.8xlarge). [Tomaz Muraus] - Add new driver for Abiquo provider - http://www.abiquo.com (LIBCLOUD-250). [Jaume Devesa] - Allow user to pass 'ex_blockdevicemappings' kwarg to the EC2 driver 'create_node' method. (LIBCLOUD-282) [Joe Miller, Tomaz Muraus] - Improve error handling in the Brightbox driver. [Tomaz Muraus] - Fix the ScriptDeployment step to work correctly if user provides a relative path for the script argument. (LIBCLOUD-278) [Jaume Devesa] - Fix Softlayer driver and make sure all the code is up to date and works with the latest version of the actual Softlayer deployment (v3). (LIBCLOUD-287) [Kevin McDonald] - Update EC2 driver, m3 instance types are now available in all the regions except Brazil. Also update pricing to reflect new (lower) prices. [Tomaz Muraus] - Minor improvements in the HostVirtual driver and add new ex_get_node and ex_build_node extension method. (LIBCLOUD-249) [Dinesh Bhoopathy] - Add ex_destroy_image method to IBM SCE driver. (LIBCLOUD-291) [Perry Zou] - Add the following new regions to the ElasticHosts driver: sjc-c, syd-v, hkg-e. (LIBCLOUD-293) [Tomaz Muraus] - Fix create_node in OpenStack driver to work correctly if 'adminPass' attribute is not present in the response. [Gavin McCance, Tomaz Muraus] - Allow users to filter images returned by the list_images method in the EC2 driver by providing ex_image_ids argument. (LIBCLOUD-294) [Chris Psaltis, Joseph Hall] - Add support for OpenNebula 3.8. (LIBCLOUD-295) [Guillaume ZITTA] - Add missing 'deletd' -> terminated mapping to OpenStack driver. (LIBCLOUD-276) [Jayy Vis] - Fix create_node in OpenStack driver to work correctly if 'adminPass' attribute is not present in the response. (LIBCLOUD-292) [Gavin McCance, Tomaz Muraus] Storage ~~~~~~~ - Add a new local storage driver. (LIBCLOUD-252, LIBCLOUD-258, LIBCLOUD-265, LIBCLOUD-273) [Mahendra M] - Fix a bug which caused the connection to not be closed when using Python 2.6 and calling get_object on an object which doesn't exist in the S3 driver. (LIBCLOUD-257) [John Carr] - Add a new generator based method for listing / iterating over the containers (iterate_containers). (LIBCLOUD-261) [Mahendra M] - Add ex_purge_object_from_cdn method to the CloudFiles driver. (LIBCLOUD-267) [Tomaz Muraus] - Support for multipart uploads and other improvements in the S3 driver so it can more easily be re-used with other implementations (e.g. Google Storage, etc.). Also default to a multipart upload when using upload_object_via_stream. This methods is more efficient compared to old approach because it only requires buffering a single multipart chunk (5 MB) in memory. (LIBCLOUD-269) [Mahendra M] - Add new driver for Windows Azure Storage with support for block and page blobs. (LIBCLOUD-80) [Mahendra M] DNS ~~~ - Update 'if type' checks in the update_record methods to behave correctly if users passes in RecordType.A with a value of 0 - if type is not None. (LIBCLOUD-247) [Tomaz Muraus] - New driver for HostVirtual provider (www.vr.org). (LIBCLOUD-249) [Dinesh Bhoopathy] - Finish Amazon Route53 driver. (LIBCLOUD-132) [John Carr] - Add new driver for Gandi provider (https://www.gandi.net). (LIBCLOUD-281) [John Carr] Load-Balancer ~~~~~~~~~~~~~ - Add new driver for AWS Elastic Load Balancing service. (LIBCLOUD-169) [John Carr] Changes with Apache Libcloud 0.11.4 ----------------------------------- General ~~~~~~~ - Fix some of tests failures which happened in Python 3.3 due to randomized dictionary ordering. (LIBCLOUD-245) [Tomaz Muraus] Compute ~~~~~~~ - Fix a bug where a numeric instead of a string value was used for the content-length header in VCloud driver. (LIBCLOUD-256) [Brian DeGeeter, Tomaz Muraus] Storage ~~~~~~~ - Don't ignore ex_force_service_region argument in the CloudFiles driver. (LIBCLOUD-260) [Dan Di Spaltro] - Fix a bug which caused the connection to not be closed when using Python 2.6 and calling get_object on an object which doesn't exist in the S3 driver. (LIBCLOUD-257) [John Carr] DNS ~~~ - Update 'if type' checks in the update_record methods to behave correctly if users passes in RecordType.A with a value of 0 - if type is not None. (LIBCLOUD-247) [Tomaz Muraus] Changes with Apache Libcloud 0.11.3 ----------------------------------- Storage ~~~~~~~ - Include 'last_modified' and 'content_type' attribute in the Object 'extra' dictionary when retrieving object using get_object in the S3 driver. Also modify 'meta_data' dictionary to include all the headers prefixed with 'x-amz-meta-'. [Tomaz Muraus] Changes with Apache Libcloud 0.11.2 ----------------------------------- General ~~~~~~~ - Fix a bug with the Libcloud SSL verification code. Code was too strict and didn't allow "-" character in the sub-domain when using a wildcard certificate. Note: This is NOT a security vulnerability. (LIBCLOUD-244) [Tomaz Muraus] Compute ~~~~~~~ - Add new Rackspace Nova driver for Chicago (ORD) location (LIBCLOUD-234) [Brian McDaniel] - Add capacity information to Vdc objects and implement power operations. (LIBCLOUD-239) [Michal Galet] - Allow user to pass 'timeout' argument to the 'deploy_node' method. [Tomaz Muraus] - Add ex_list_security_groups, ex_authorize_security_group and ex_describe_all_keypairs methods to the EC2 driver. (LIBCLOUD-241, LIBCLOUD-243) [Nick Bailey] - Add new methods for managing storage volumes and other extenstion methods to the IBM SCE driver. (LIBCLOUD-242) [Sengor Kusturica] Storage ~~~~~~~ - Add the following new methods to the CloudFiles driver: ex_set_account_metadata_temp_url_key, ex_get_object_temp_url. (GITHUB-72) [Shawn Smith] Load-balancer ~~~~~~~~~~~~~ - Add 'balancer' attribute to the Member instance. This attribute refers to the LoadBalancer instance this member belongs to. [Tomaz Muraus] Changes with Apache Libcloud 0.11.1 ----------------------------------- General ~~~~~~~ - Fix hostname validation in the SSL verification code (CVE-2012-3446). Reported by researchers from the University of Texas at Austin (Martin Georgiev, Suman Jana and Vitaly Shmatikov). Changes with Apache Libcloud 0.11.0 ----------------------------------- Compute ~~~~~~~ - Add a timeout of 10 seconds to OpenStackAuthConnection class. (LIBCLOUD-199) [Chris Gilmer] - Add time.sleep(wait_period) to _ssh_client_connect to prevent busy loops when we immediately can't connect to a server. (LIBCLOUD-197) [Jay Doane] - Fix a bug with Python 3 support in the following drivers - IBM SCE, - CloudStack - CloudSigma - OpenNebula - VpsNet - EC2 - ElasticStack - vCloud - OpSource - Slicehost - Joyent (LIBCLOUD-204) [Sengor Kusturica, Hutson Betts, Tomaz Muraus] - Make CloudStack driver more robust and make it work if list_images() call returns no images. (LIBCLOUD-202) [Gabriel Reid] - Add force_ipv4 argument to _wait_until_running and default it to True. This will make Libcloud ignore IPv6 addresses when using deploy_node. (LIBCLOUD-200) [Jay Doane, Tomaz Muraus] - Include error text if a CloudStack async job returns an error code. (LIBCLOUD-207) [Gabriel Reid] - Add extenstion methods for block storage volume management to the CloudStack driver. (LIBCLOUD-208) [Gabriel Reid] - New driver for KT UCloud (http://home.ucloud.olleh.com/main.kt) based on the CloudStack driver. [DaeMyung Kang] - Add a standard API and methods for managing storage volumes to the EC2 and CloudStack drivers. Base API consistent of the following methods: create_volume, destroy_volume, attach_volume, detach_volume. (LIBCLOUD-213) [Gabriel Reid] - Change ex_describe_tags, ex_create_tags and ex_delete_tags methods signature in the EC2 driver. Argument is now called resource (previously it was called node). This methods work with both Node and StorageVolume objects. (LIBCLOUD-213) [Gabriel Reid, Tomaz Muraus] - Add Rackspace Nova London driver. [Chris Gilmer] - Fix a bug - If user doesn't pass in 'network_id' argument to the create_node method in the CloudStack driver, don't explicitly define it. (LIBCLOUD-219) [Bruno Mahé, Tomaz Muraus] - Modify EC2 driver to also return cc2.8xlarge cluster compute instance in the eu-west-1 region. [Tomaz Muraus] - Add 'auth_user_variable' to the OpenStackAuthConnection class. [Mark Everett] - Fix a bug with repeated URLs in some requests the vCloud driver. (LIBCLOUD-222) [Michal Galet] - New Gridspot driver with basic list and destroy functionality. (LIBCLOUD-223) [Amir Elaguizy] - Add methods for managing storage volumes to the Gandi driver. (LIBCLOUD-225) [Aymeric Barantal] DNS ~~~ - Add support for GEO RecordType to Zerigo driver. (LIBCLOUD-203) [Gary Wilson] - Fix a bug with Python 3 support in the following drivers (LIBCLOUD-204) - Zerigo [Tomaz Muraus] - Add support for URL RecordType to Zerigo driver. (LIBCLOUD-209) [Bojan Mihelac] - Properly handle record creation when user doesn't provider a record name and wants to create a record for the actual domain. Reported by Matt Perry (LIBCLOUD-224) [Tomaz Muraus] Storage ~~~~~~~ - Fix a bug with Python 3 support in the following drivers - Atmos - Google Storage - Amazon S3 (LIBCLOUD-204) [Tomaz Muraus] - Fix a bug in the CloudFiles driver which prevented it to work with accounts which use a non ORD endpoint. (LIBCLOUD-205) [Geoff Greer] - Fix a bug in the enable_container_cdn method. (LIBCLOUD-206) [Geoff Greer] - Allow user to specify container CDN TTL when calling container.enable_cd() using ex_ttl keyword argument in the CloudFiles driver. [Tomaz Muraus] - Add ex_enable_static_website and ex_set_error_page method to the CloudFiles driver. [Tomaz Muraus] - Propagate kwargs passed to container.download_object() to driver.download_object(). (LIBCLOUD-227) [Benno Rice] - Fix a bug with not escaping container and object name in the Atmos driver. [Russell Keith-Magee, Benno Rice] - Fix upload_object_via_stream method in the Atmos driver. (LIBCLOUD-228) [Benno Rice] - Fix a bug with uploading zero-sized files in the OpenStack Swift / CloudFiles driver. [Tomaz Muraus] - Fix a bug with content_type and encoding of object and path names in the Atmos driver. [Russell Keith-Magee] Other ~~~~~ - Unify docstrings formatting in the compute drivers. (LIBCLOUD-229) [Ilgiz Islamgulov] Changes with Apache Libcloud 0.10.1 ----------------------------------- General ~~~~~~~ - Add timeout attribute to base 'Connection' class and pass it to the connection class constructor if Python version is not 2.5. [Chris Gilmer] Compute ~~~~~~~ - Update IBM SBC driver so it works with IBM Smart Cloud Enterprise. (LIBCLOUD-195) [Sengor Kusturica] - Add ex_register_iso method to the CloudStack driver. (LIBCLOUD-196) [Daemian Mack] - Allow user to specify which IP to use when calling deploy_node. (defaults to 'public_ips'). Previously it only worked with public IP, now user can pass 'private_ips' as an argument and SSH client will try to connect to the node first private IP address. [Jay Doane] - Fix CloudSigmaLvsNodeDriver connectionCls bug. [Jerry Chen] - Add 'ex_keyname' argument to the create_node method in the OpenStack driver. (LIBCLOUD-177) [Jay Doane] - Fix a problem in deploy_node - make it work with providers which don't instantly return created node in the list_node response. Also add __str__ and __repr__ method to DeploymentError so the error message is more useful. (LIBCLOUD-176) [Jouke Waleson, Tomaz Muraus] - Add 'ssh_key' feature to Brigthbox driver. This way it works with deploy_node. (LIBCLOUD-179) [Neil Wilson] - Add Joyent compute driver. [Tomaz Muraus] - Store auth token expire times on the connection class in the attribute called 'auth_token_expires'. (LIBCLOUD-178) [Chris Gilmer, Brad Morgan] - Add new driver for VCL cloud (http://www.educause.edu/blog/hes8/CloudComputingandtheVirtualCom/167931) (LIBCLOUD-180) [Jason Gionta, Tomaz Muraus] - Improve and add new features to Brightbox driver - Update fixtures to represent actual api output - Update compute tests to 100% coverage - Add userdata and server group extensions to create_node - Add ipv6 support to public ip list - Improve in line documentation - Add lots of api output information to Node and Image 'extra' attributes - Allow variable API versions (api_version argument) - Allow reverse dns updates for cloud ip extensions (LIBCLOUD-184) [Neil Wilson, Tomaz Muraus] - Add ex_userdata argument to the OpenStack 1.1 driver. (LIBCLOUD-185) [Jay Doane] - Modify Vmware vCloud driver and implement new features for the vCloud version 1.5. (LIBCLOUD-183) [Michal Galet, Sengor Kusturica] - Allow user to pass mode argument to SSHClient.put method and default it to 'w'. (LIBCLOUD-188) [Jay Doane] - Modify SSHKeyDeployment step to use append mode so it doesn't overwrite existing entries in .ssh/authorized_keys. (LIBCLOUD-187) [Jay Doane] - Modify ParamikoSSHClient to connect to the SSH agent and automatically look for private keys in ~/.ssh if the 'auth' and 'ssh_key' argument is not specified when calling deploy_node. (LIBCLOUD-182) [Tomaz Muraus] - Add ex_rescue and ex_unrescue method to OpenStack 1.1 driver. (LIBCLOUD-193) [Shawn Smith] - Include 'password' in the node extra dictionary when calling deploy_node if the password auth is used. [Juan Carlos Moreno] - Add FileDeployment class to libcloud.compute.deployment module. This can be used as a replacement for ex_files argument if the provider supports deployment functionality. (LIBCLOUD-190) [Jay Doane] Storage ~~~~~~~ - Large object upload support for CloudFiles driver - Add CLOUDFILES_SWIFT driver to connect to OpenStack Swift [Dmitry Russkikh, Roman Bogorodskiy] Load-balancer ~~~~~~~~~~~~~ - Don't include 'body_regex' attribute in the Rackspace driver body if body_regex is None or empty string. (LIBCLOUD-186) [Bill Woodward] - Don't split Load balancer IP addresses into public and private list. Include all the addresses in the 'virtualIps' variable in the extra dictionary (Rackspace driver). (LIBCLOUD-191) [Adam Pickeral] Changes with Apache Libcloud 0.9.1 ---------------------------------- General ~~~~~~~ - Make parsing of the Auth API responses in the OpenStack drivers more flexible and extensible. Now, every connection class that inherits from the openstack base connection must implement get_endpoint(), who's job is to return the correct endpoint out of the service catalog. Note: The openstack.py base driver no longer works by default with Rackspace nova. The default endpoint parsed from the service catalog is the default compute endpoint for devstack. (LIBCLOUD-151) [Brad Morgan] - Allow user to pass ex_tenant_name keyword argument to the OpenStack node driver class. This scopes all the endpoints returned by the Auth API endpoint to the provided tenant. (LIBCLOUD-172) [James E. Blair] - Allow user to specify OpenStack service catalog parameters (service type, name and region). This way base OpenStack driver can be used with different providers without needing to subclass. (LIBCLOUD-173) [James E. Blair] - Fix a bug with handling compressed responses in the Linode driver. (LIBCLOUD-158) [Ben Agricola] Compute ~~~~~~~ - Add new RackspaceNovaBeta and RackspaveNovaDfw driver based on the OpenStack. (LIBCLOUD-151) [Brad Morgan] - Include 'created' and 'updated' attribute in the OpenStack 1.1 driver. (LIBCLOUD-155) [Chris Gilmer] - Include 'minRam' and 'minDisk' attribute in the OpenStack 1.1 driver Node extra dictionary. (LIBCLOUD-163) [Chris Gilmer] - Allow users to use a list of tuples for the query string parameters inside the OpenStack connection classes. This way same key can be specified multiple times (LIBCLOUD-153) [Dave King] - Allow user to pass 'max_tries' keyword argument to deploy_node method. [Tomaz Muraus] - Include original exception error message when re-throwing an exception inside _run_deployment_script method. [Tomaz Muraus] - Add support for ElasticHosts new United States (Los Angeles) and Canada (Toronto) locations. (GITHUB-53) [Jaime Irurzun] - Add serverId attribute to the NodeImage object extra dictionary in the OpenStack driver. [Mark Everett] - Add new EC2 instance type - m1.medium. [Tomaz Muraus] - Allow user to re-use auth tokens and pass 'ex_force_auth_token' keyword argument to the OpenStack driver constructor. (LIBCLOUD-164) [Dave King] - Add new experimental libvirt driver. [Tomaz Muraus] - Properly handle OpenStack providers which return public IP addresses under the 'internet' key in the addresses dictionary. [Tomaz Muraus] - Update create_node in Linode driver and make it return a Node object instead of a list. Reported by Jouke Waleson. (LIBCLOUD-175) [Tomaz Muraus] Storage ~~~~~~~ - Don't lowercase special header names in the Amazon S3 storage driver. (LIBCLOUD-149) [Tomaz Muraus] Load-balancer ~~~~~~~~~~~~~ - Allow user to specify a condition and weight when adding a member in the Rackspace driver. [Adam Pickeral] - Add an extension method (ex_balancer_attach_members) for attaching multiple members to a load balancer in the Rackspace driver. (LIBCLOUD-152) [Adam Pickeral] - Add ex_creaate_balancer method to the Rackspace driver and allow user to pass 'vip' argument to it. (LIBCLOUD-166) [Adam Pickeral] - Update Rackspace driver to support Auth 2.0. (LIBCLOUD-165) [Dave King] - Add new ex_create_balancer_access_rule and ex_create_balancer_access_rule_no_poll method to the Rackspace driver. (LIBCLOUD-170) [Dave King] DNS ~~~ - Update Rackspace driver to support Auth 2.0. (LIBCLOUD-165) [Dave King] Changes with Apache Libcloud 0.8.0 ---------------------------------- General ~~~~~~~ - Add 'request_kwargs' argument to the get_poll_request_kwargs method. This argument contains kwargs which were previously used to initiate the poll request. [Mark Everett] - Add support for handling compressed responses (deflate, gzip). Also send "Accept-Encoding" "gzip,deflate" header with all the requests. [Tomaz Muraus] - Fix debug module (LIBCLOUD_DEBUG env variable) so it works with Python 3 [Tomaz Muraus] Compute ~~~~~~~ - Added support for retrieving OpenNebula v3.2 instance types, OpenNebula v3.0 network Public attribute support, and additional code coverage tests. [Hutson Betts] - Add implementation for ex_save_image method to the OpenStack 1.1 driver. [Shawn Smith] - Add support for Amazon new South America (Sao Paulo) location. [Tomaz Muraus] - Fix a bug in OpenStack driver when 2.0_apikey or 2.0_password 'auth_version' is used. [Tomaz Muraus] - Current OpenNebula OCCI implementation does not support a proper restart method. Rather it suspends and resumes. Therefore, restart_node has been removed from the OpenNebula driver. [Hutson Betts] - Enable ex_delete_image method in the OpenStack 1.1 driver. [Shawn Smith] - Return NodeImage instance in OpenStack 1.1 driver ex_save_image method (LIBCLOUD-138) [Shawn Smith] - Enable reboot_node method in the OpenNebula 3.2 driver. [Hutson Betts] - Fix a public_ips Node variable assignment in the Gandi.net driver. [Aymeric Barantal] - Updated the list of node states for OpenNebula drivers. (LIBCLOUD-148) [Hutson Betts] Storage ~~~~~~~ - Propagate extra keyword arguments passed to the Rackspace driver connection class. [Dave King] Load-balancer ~~~~~~~~~~~~~ - Add 'extra' attribute to the LoadBalancer object and retrieve all the virtual IP addresses in the Rackspace driver. [Dave King] - Add list_supported_algorithms() method to the base LoadBalancer class. This method returns a list of supported algorithms by the provider. [Dave King] - Update Rackspace driver: - Add two new supported algorithms: WEIGHTED_ROUND_ROBIN, WEIGHTED_LEAST_CONNECTIONS - Add ex_list_algorithm_names method - Add ex_get_balancer_error_page method - Add ex_balancer_access_list method - Populate LoadBalancer extra dictionary with more attributes - Add support for health monitors and connection throttling - Add more balancer states - ex_list_protocols_with_default_ports [Dave King] - Propagate extra keyword arguments passed to the Rackspace driver connection class. [Dave King] - Add 'extra' attribute to the Member object and populate it in the Rackspace driver. [Mark Everett] - Adds status to the Member object and conditions an 'enum' (Rackspace driver). [Mark Everett] - Add update_balancer method to the base LoadBalancer class. [Mark Everett] - Add update_balancer method to the Rackspace driver. [Mark Everett] - Add created and updated attribute to the LoadBalancer extra dictionary in the Rackspace driver. [Mark Everett] - Fix protocol name maping in the Rackspace driver. [Bill Woodward] Changes with Apache Libcloud 0.7.1 ---------------------------------- General ~~~~~~~ - Fix a minor bug in debug mode (LIBCLOUD_DEBUG=/dev/stderr) which has been introduced when adding Python 3 compatibility layer. [Paul Querna] - Update OpenStack Auth API endpoint paths. [Paul Querna] Changes with Apache Libcloud 0.7.0 ---------------------------------- General ~~~~~~~ - Add support for Python 3.x. [Tomaz Muraus] - Remove old deprecated paths. [Tomaz Muraus] Compute ~~~~~~~ - Update CloudSigma Zurich API endpoint address. [Tomaz Muraus] - Add new US Las Vegas endpoint to CloudSigma driver (types.CLOUDSIGMA_US) [Tomaz Muraus] - Allow user to specify drive type (hdd, ssd) when creating a CloudSigma server. Note 'ssd' drive_type doesn't work with the API yet. [Tomaz Muraus] - Update OpenStack 1.1 driver to comply with the API specs. Need to make another call to retrieve node name and ip addresses when creating a node, because the first call only returns an id an the password. (GITHUB-40) [Dave King] - Add ex_node_ids argument to the EC2 driver list_nodes method. (GITHUB-39) [Suvish Vt] - If OpenStack Auth 2.0 API is used, also parse out tenant id and name and save it on the connection class (conn.tenant['id'], conn.tenant['name']). [Tomaz Muraus] - Add new "Cluster Compute Eight Extra Large" size to the Amazon EC2 driver. [Tomaz Muraus] - Add the following extension methods to the EC2 compute driver: ex_describe_all_addresses, ex_associate_addresses, ex_start_node, ex_stop_node. [Suvish Vt] - Change public_ip and private_ip attribute on the Node object to the public_ips and private_ips since both of the objects are always a list. Note: For backward compatibility you can still access public_ip and private_ip attributes, but this will be removed in the next release. [Tomaz Muraus] - Fix an inconsistency in IBM SBC driver and make sure public_ips and private_ips attributes are a list. [Tomaz Muraus] - Fix an inconsistency in OpSource driver and make sure public_ips is an empty list ([]), not 'unknown' [Tomaz Muraus] - Updated support for OpenNebula.org v1.4, v2.x, and v3.x APIs and included additional compute tests validating functionality. (LIBCLOUD-121) [Hutson Betts] Load-balancer ~~~~~~~~~~~~~ - Add ex_member_address argument to the Rackspace driver list_balancers method. If this argument is provided, only loadbalancers which have a member with the provided IP address attached are returned. [Tomaz Muraus] Changes with Apache Libcloud 0.6.2 ---------------------------------- General ~~~~~~~ - Fix a bug in PollingConnection class - actually use and don't ignore the poll_interval [Tomaz Muraus] Compute ~~~~~~~ - Add support for Auth 2.0 API (keystone) to the OpenStack Auth connection class. [Brad Morgan] - Add list_locations method to the OpenStack driver and fix some inconsistencies in the OpenStack driver extension method signatures. [Brad Morgan] - Update Amazon EC2 driver and pricing data to support a new region - US West 2 (Oregon) [Tomaz Muraus] - Expose 'CLOUDSTACK' provider. This driver can be used with an arbitrary CloudStack installation. [Tomaz Muraus] Storage ~~~~~~~ - Update Amazon S3 driver to support a new region - US West 2 (Oregon) [Tomaz Muraus] DNS ~~~ - Increase the default poll interval in the Rackspace driver to 2.5 seconds. [Tomaz Muraus] - Fix a bug in Rackspace Cloud DNS driver and make sure to throw an exception if an unexpected status code is returned. Reported by "jeblair". [Tomaz Muraus] Changes with Apache Libcloud 0.6.1 ---------------------------------- General ~~~~~~~ - Modify ParamikoSSHClient.connect so it supports authentication using a key file, (LIBCLOUD-116) [Jay Doane] - User must now explicitly specify a path when using LIBCLOUD_DEBUG environment variable. (LIBCLOUD-95) [daveb, Tomaz Muraus] - Add new XmlResponse and JsonResponse base class and modify all the driver-specific response classes to inherit from one of those two classes where applicable. [Caio Romão] - Add new 'PollingConnection' class. This class can work with 'async' APIs. It sends and an initial request and then periodically poll the API until the job has completed or a timeout has been reached. [Tomaz Muraus] Compute ~~~~~~~ - Add 24GB size to the GoGrid driver [Roman Bogorodskiy] - Fix API endpoint URL in the Softlayer driver [Tomaz Muraus] - Add support for OpenNebula 3.0 API (LIBCLOUD-120) [Hutson Betts] - Add more attributes to the extra dictionary in the EC2 driver. (GITHUB-31) [Juan Carlos Moreno] - Fix IP address assignment in the EC2 driver. Don't include "None" in the public_ip and private_ip Node list attribute. [Tomaz Muraus] - Make deploy_node functionality more robust and don't start deployment if node public_ip attribute is an empty list. [Tomaz Muraus] - Support SSH key authentication when using deploy_node. [Russell Haering, Tomaz Muraus] - Enable deploy_node functionality in the EC2 driver using SSH key authentication [Russell Haering, Tomaz Muraus] - Enable paramiko library debug log level if LIBCLOUD_DEBUG is used and paramiko is installed. [Tomaz Muraus] - Fix the request signature generation in the base EC2 compute driver. If the endpoint is using a non-standard port (Eucalyptus based installations), append it to the hostname used to generate the signature. [Simon Delamare] - Add new "unavailable" state to the BrightboxNodeDriver class. [Tim Fletcher] - Increase a PollingConnection timeout in the CloudStack connection and fix the context dictionary creation in the _async_request method. [Oleg Suharev] - Fix networks retrieval in the CloudStack driver create_node method. Also only pass 'networkids' field to the API if there are any networks available. [Oleg Suharev, Tomaz Muraus] - Fix list_nodes in the CloudStack driver. Private IPs aren't always available. [Tomaz Muraus] Load-baancer ~~~~~~~~~~~~ - Add a missing argument to the method call inside LoadBalancer.attach_compute_node and Driver.balancer_attach_compute_node. [Tim Fletcher, Tomaz Muraus] - Add missing destroy() method to the LoadBalancer class. [Tomaz Muraus] DNS ~~~ - New drivers for Rackspace Cloud DNS (US and UK region) [Tomaz Muraus] - Add list_record_types() method. This method returns a list of record types supported by the provider. [Tomaz Muraus] Changes with Apache Libcloud 0.6.0-beta1 ---------------------------------------- General ~~~~~~~ - All the driver classes now inherit from the BaseDriver class [Tomaz Muraus] - Prefer simplejson (if available) over json module. (LIBCLOUD-112) [Geoff Greer] - Update compute demo and change the syntax of test credentials stored in test/secrets.py-dist. (LIBCLOUD-111) [Mike Nerone] - Enable SSL certificate verification by default and throw an exception if CA certificate files cannot be found. This can be overridden by setting libcloud.security.VERIFY_SSL_CERT_STRICT to False. [Tomaz Muraus] Compute ~~~~~~~ - Support for 1.1 API and many other improvements in the OpenStack driver ; LIBCLOUD-83 [Mike Nerone, Paul Querna, Brad Morgan, Tomaz Muraus] - Add some extra methods to the Gandi.net driver (LIBCLOUD-115) [Aymeric Barantal] - Add ex_delete_image method to the Rackspace driver. (GITHUB-27) [David Busby] - Linode driver now supports new 'Japan' location [Jed Smith] - Rackspace driver now inherits from the OpenStack one instead of doing it vice versa. (LIBCLOUD-110) [Mike Nerone] - Properly populate NodeImage "details" dictionary in the Rackspace compute driver. (LIBCLOUD-107) [Lucy Mendel] - Fix a bug in Eucalyptus driver ex_describe_addresses method. [Tomaz Muraus] - Add the following new extenstion methods to the Rackspace driver: ex_resize, ex_confirm_resize, ex_revert_resize. [Tomaz Muraus] - Also allow user to pass in Node object to some ex\_ methods in the Rackspace compute driver. [Tomaz Muraus] - Throw an exception in deploy_node if paramiko library is not available [Tomaz Muraus] - Fix chmod argument value which is passed to the sftpclient.put method; GITHUB-17 [John Carr] - New driver for Ninefold.com. (LIBCLOUD-98) [Benno Rice] Storage ~~~~~~~ - New driver for Google Storage based on the v1.0 / legacy API [Tomaz Muraus] - New driver for Ninefold.com. (GITHUB-19) [Benno Rice] - Fix a bug in uploading an object with some versions of Python 2.7 where httplib library doesn't automatically call str() on the header values. [Tomaz Muraus] - Allow users to upload (create) 0-bytes large (empty) objects [Tomaz Muraus] Load-balancer ~~~~~~~~~~~~~ - New driver for Rackspace UK location [Tomaz Muraus] - New driver for Ninefold.com. (LIBCLOUD-98) [Benno Rice] DNS ~~~ - Drivers for Linode DNS and Zerigo DNS [Tomaz Muraus] - Brand new DNS API! [Tomaz Muraus] Changes with Apache Libcloud 0.5.2 ---------------------------------- Compute ~~~~~~~ - New driver for serverlove.com and skalicloud.com [Tomaz Muraus] - Fix node name and tag handling in the Amazon EC2 driver [Wiktor Kolodziej] - Fix pricing and response handling in the OpenStack driver [Andrey Zhuchkov] - Fix deploy_node() method and make it more robust [Tomaz Muraus] - Users can now pass file like objects to ScriptDeployment and SSHKeyDeployment constructor. [Tomaz Muraus] - Include node tags when calling list_nodes() in the Amazon EC2 driver [Trevor Pounds] - Properly handle response errors in the Rackspace driver and only throw InvalidCredsError if the returned status code is 401 [Brad Morgan] - Fix the create_node method in the Nimbus driver and make the "ex_create_tag" method a no-op, because Nimbus doesn't support creating tags. [Tomaz Muraus] Storage ~~~~~~~ - Fix handling of the containers with a lot of objects. Now a LazyList object is returned when user calls list_container_objects() method and this object transparently handles pagination. [Danny Clark, Wiktor Kolodziej] Changes with Apache Libcloud 0.5.0 ---------------------------------- - Existing APIs directly on the libcloud.* module have been deprecated and will be removed in version 0.6.0. Most methods were moved to the libcloud.compute.* module. - Add new libcloud.loadbalancers API, with initial support for: - GoGrid Load Balancers - Rackspace Load Balancers [Roman Bogorodskiy] - Add new libcloud.storage API, with initial support for: - Amazon S3 - Rackspace CloudFiles [Tomaz Muraus] - Add new libcloud.compute drivers for: - Bluebox [Christian Paredes] - Gandi.net [Aymeric Barantal] - Nimbus [David LaBissoniere] - OpenStack [Roman Bogorodskiy] - Opsource.net [Joe Miller] - Added "pricing" module and improved pricing handling. [Tomaz Muraus] - Updates to the GoGrid compute driver: - Use API version 1.0. - Remove sandbox flag. - Add ex_list_ips() to list IP addresses assigned to the account. - Implement ex_edit_image method which allows changing image attributes like name, description and make image public or private. [Roman Bogorodskiy] - Updates to the Amazon EC2 compute driver: - When creating a Node, use the name argument to set a Tag with the value. [Tomaz Muraus] - Add extension method for modifying node attributes and changing the node size. [Tomaz Muraus] - Add support for the new Amazon Region (Tokyo). [Tomaz Muraus] - Added ex_create_tags and ex_delete_tags. [Brandon Rhodes] - Include node Elastic IP addresses in the node public_ip attribute for the EC2 nodes. [Tomaz Muraus] - Use ipAddress and privateIpAddress attribute for the EC 2node public and private ip. [Tomaz Muraus] - Add ex_describe_addresses method to the EC2 driver. [Tomaz Muraus] - Updates to the Rackspace CloudServers compute driver: - Add ex_rebuild() and ex_get_node_details() [Andrew Klochkov] - Expose URI of a Rackspace node to the node meta data. [Paul Querna] - Minor fixes to get the library and tests working on Python 2.7 and PyPy. [Tomaz Muraus] Changes with Apache Libcloud 0.4.2 (Released January 18, 2011) -------------------------------------------------------------- - Fix EC2 create_node to become backward compatible for NodeLocation. [Tomaz Muraus] - Update code for compatibility with CPython 2.5 [Jerry Chen] - Implement ex_edit_node method for GoGrid driver which allows changing node attributes like amount of RAM or description. [Roman Bogorodskiy] - Add ex_set_password and ex_set_server_name to Rackspace driver. [Peter Herndon, Paul Querna] - Add Hard and Soft reboot methods to Rackspace driver. [Peter Herndon] - EC2 Driver availability zones, via ex_list_availability_zones; list_locations rewrite to include availability zones [Tomaz Muraus] - EC2 Driver Idempotency capability in create_node; LIBCLOUD-69 [David LaBissoniere] - SSL Certificate Name Verification: - libcloud.security module - LibcloudHTTPSConnection, LibcloudHTTPConnection (alias) - Emits warning when not verifying, or CA certs not found - Append ORD1 to available Rackspace location, but keep in the same node as DFW1, because it's not readable or writeable from the API. [Per suggestion of Grig Gheorghiu] - ex_create_ip_group, ex_list_ip_groups, ex_delete_ip_group, ex_share_ip, ex_unshare_ip, ex_list_ip_addresses additions to Rackspace driver [Andrew Klochkov] - New driver for CloudSigma. [Tomaz Muraus] - New driver for Brightbox Cloud. (LIBCLOUD-63) [Tim Fletcher] - Deployment capability to ElasticHosts [Tomaz Muraus] - Allow deploy_node to use non-standard SSH username and port [Tomaz Muraus] - Added Rackspace UK (London) support [Chmouel Boudjnah] - GoGrid driver: add support for locations, i.e. listing of locations and creation of a node in specified location [Roman Bogorodskiy] - GoGrid and Rackspace drivers: add ex_save_image() extra call to convert running node to an image [Roman Bogorodskiy] - GoGrid driver: add support for creating 'sandbox' server and populate isSandbox flag in node's extra information. [Roman Bogorodskiy] - Add ImportKeyPair and DescribeKeyPair to EC2. (LIBCLOUD-62) [Philip Schwartz] - Update EC2 driver and test fixtures for new API. [Philip Schwartz] Changes with Apache Libcloud 0.4.0 [Released October 6, 2010] ------------------------------------------------------------- - Add create keypair functionality to EC2 Drivers. (LIBCLOUD-57) [Grig Gheorghiu] - Improve handling of GoGrid accounts with limited access API keys. [Paul Querna] - New Driver for ElasticHosts. (LIBCLOUD-45) [Tomaz Muraus] - Use more consistent name for GoGrid driver and use http POST method for 'unsafe' operations [Russell Haering] - Implement password handling and add deployment support for GoGrid nodes. [Roman Bogorodskiy] - Fix behavior of GoGrid's create_node to wait for a Node ID. [Roman Bogorodskiy] - Add ex_create_node_nowait to GoGrid driver if you don't need to wait for a Node ID when creating a node. [Roman Bogorodskiy] - Removed libcloud.interfaces module. [Paul Querna] - Removed dependency on zope.interfaces. [Paul Querna] - RimuHosting moved API endpoint address. [Paul Querna] - Fix regression and error in GoGrid driver for parsing node objects. [Roman Bogorodskiy] - Added more test cases for GoGrid driver. (LIBCLOUD-34) [Roman Bogorodskiy, Jerry Chen] - Fix parsing of Slicehost nodes with multiple Public IP addresses. [Paul Querna] - Add exit_status to ScriptDeployment. (LIBCLOUD-36) [Paul Querna] - Update prices for several drivers. [Brad Morgan, Paul Querna] - Update Linode driver to reflect new plan sizes. [Jed Smith] - Change default of 'location' in Linode create_node. (LIBCLOUD-41) [Jed Smith, Steve Steiner] - Document the Linode driver. [Jed Smith] - Request a private, LAN IP address at Linode creation. [Jed Smith] Changes with Apache Libcloud 0.3.1 [Released May 11, 2010] ---------------------------------------------------------- - Updates to Apache License blocks to correctly reflect status as an Apache Project. - Fix NOTICE file to use 2010 copyright date. - Improve error messages for when running the test cases without first setting up a secrets.py Changes with Apache Libcloud 0.3.0 [Tagged May 6, 2010, not released] --------------------------------------------------------------------- - New Drivers for: - Dreamhost - Eucalyptus - Enomaly ECP - IBM Developer Cloud - OpenNebula - SoftLayer - Added new deployment and bootstrap API. - Improved Voxel driver. - Added support for Amazon EC2 Asia Pacific (Singapore) Region. - Improved test coverage for all drivers. - Add support for multiple security groups in EC2. - Fixed bug in Rackspace and RimuHosting when using multiple threads. - Improved debugging and logging of HTTP requests. - Improved documentation for all classes and methods. Changes with Apache Libcloud 0.2.0 [Tagged February 2, 2010] ------------------------------------------------------------ - First public release.