From 51c1e15dd690a0aa150fca1b60b9d73d1d00aca3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 05:14:32 +0000 Subject: [PATCH 001/143] Create 0.12.x branch git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439021 13f79535-47bb-0310-9956-ffa450edef68 From 923704a204939eb50d547ad1bc9edc7a1108f158 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 05:57:51 +0000 Subject: [PATCH 002/143] Remove "datacenter" related changes and improvements in the compute and storage drivers. Some of those changes are backward incompatible and are still work in progress. They will be included in the next major release. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439028 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 19 -- libcloud/compute/drivers/ec2.py | 110 +++++++++++- libcloud/compute/drivers/rackspace.py | 130 +++----------- libcloud/compute/drivers/rackspacenova.py | 104 +++++++++++ libcloud/compute/providers.py | 26 ++- libcloud/compute/types.py | 23 +-- libcloud/storage/drivers/cloudfiles.py | 141 +++++++-------- libcloud/storage/providers.py | 11 +- libcloud/storage/types.py | 4 +- .../fixtures/openstack/_v1_1__auth.json | 11 -- .../fixtures/openstack/_v2_0__auth.json | 6 - libcloud/test/compute/test_deployment.py | 2 +- libcloud/test/compute/test_ec2.py | 92 +--------- libcloud/test/compute/test_rackspace.py | 163 +----------------- libcloud/test/compute/test_rackspacenova.py | 149 ++++++++++++++++ libcloud/test/storage/test_cloudfiles.py | 101 ++--------- 16 files changed, 472 insertions(+), 620 deletions(-) create mode 100644 libcloud/compute/drivers/rackspacenova.py create mode 100644 libcloud/test/compute/test_rackspacenova.py diff --git a/CHANGES b/CHANGES index eb6fb96a5d..4d1d82a3ff 100644 --- a/CHANGES +++ b/CHANGES @@ -33,16 +33,6 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-245) [Tomaz Muraus] - - Refactor Rackspace driver classes and make them easier to use. Now there - are two rackspace constants - Provider.RACKSPACE which represents new - next-gen OpenStack servers and Provider.RACKSPACE_FIRST_GEN which - represents old cloud servers. - - Note: This change is backward incompatible. For more information on those - changes and how to upgrade your code to make it work with it, please visit - TODO. - [Tomaz Muraus] - - Improvements and additions in vCloud driver: - Expose generic query method (ex_query) - Provide functionality to get and set control access for vApps. This way @@ -127,15 +117,6 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-267) [Tomaz Muraus] - - Deprecate CLOUDFILES_US and CLOUDFILES_UK storage provider constants and - add a new CLOUDFILES constant. - Driver referenced by this constant takes a "datacenter" keyword argument - and can be one of 'ord', 'dfw' or 'lon'. - - Note: Deprecated constants will continue to work for the foreseeable - future. - [Tomaz Muraus] - - Support for multipart uploads and other improvemetns in the S3 driver so it can more easily be re-used with other implementations (e.g. Google Storage, etc.). diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index a2ba57775e..06620cd38b 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1412,53 +1412,149 @@ def __str__(self): return repr(self.value) +class EC2EUConnection(EC2Connection): + """ + Connection class for EC2 in the Western Europe Region + """ + host = REGION_DETAILS['eu-west-1']['endpoint'] + + class EC2EUNodeDriver(EC2NodeDriver): """ Driver class for EC2 in the Western Europe Region. """ - _datacenter = 'eu-west-1' + api_name = 'ec2_eu_west' + name = 'Amazon EC2 (eu-west-1)' + friendly_name = 'Amazon Europe Ireland' + country = 'IE' + region_name = 'eu-west-1' + connectionCls = EC2EUConnection + + +class EC2USWestConnection(EC2Connection): + """ + Connection class for EC2 in the Western US Region + """ + + host = REGION_DETAILS['us-west-1']['endpoint'] class EC2USWestNodeDriver(EC2NodeDriver): """ Driver class for EC2 in the Western US Region """ - _datacenter = 'us-west-1' + + api_name = 'ec2_us_west' + name = 'Amazon EC2 (us-west-1)' + friendly_name = 'Amazon US N. California' + country = 'US' + region_name = 'us-west-1' + connectionCls = EC2USWestConnection + + +class EC2USWestOregonConnection(EC2Connection): + """ + Connection class for EC2 in the Western US Region (Oregon). + """ + + host = REGION_DETAILS['us-west-2']['endpoint'] class EC2USWestOregonNodeDriver(EC2NodeDriver): """ Driver class for EC2 in the US West Oregon region. """ - _datacenter = 'us-west-2' + + api_name = 'ec2_us_west_oregon' + name = 'Amazon EC2 (us-west-2)' + friendly_name = 'Amazon US West - Oregon' + country = 'US' + region_name = 'us-west-2' + connectionCls = EC2USWestOregonConnection + + +class EC2APSEConnection(EC2Connection): + """ + Connection class for EC2 in the Southeast Asia Pacific Region. + """ + + host = REGION_DETAILS['ap-southeast-1']['endpoint'] + + +class EC2APNEConnection(EC2Connection): + """ + Connection class for EC2 in the Northeast Asia Pacific Region. + """ + + host = REGION_DETAILS['ap-northeast-1']['endpoint'] class EC2APSENodeDriver(EC2NodeDriver): """ Driver class for EC2 in the Southeast Asia Pacific Region. """ - _datacenter = 'ap-southeast-1' + + api_name = 'ec2_ap_southeast' + name = 'Amazon EC2 (ap-southeast-1)' + friendly_name = 'Amazon Asia-Pacific Singapore' + country = 'SG' + region_name = 'ap-southeast-1' + connectionCls = EC2APSEConnection class EC2APNENodeDriver(EC2NodeDriver): """ Driver class for EC2 in the Northeast Asia Pacific Region. """ - _datacenter = 'ap-northeast-1' + + api_name = 'ec2_ap_northeast' + name = 'Amazon EC2 (ap-northeast-1)' + friendly_name = 'Amazon Asia-Pacific Tokyo' + country = 'JP' + region_name = 'ap-northeast-1' + connectionCls = EC2APNEConnection + + +class EC2SAEastConnection(EC2Connection): + """ + Connection class for EC2 in the South America (Sao Paulo) Region. + """ + + host = REGION_DETAILS['sa-east-1']['endpoint'] class EC2SAEastNodeDriver(EC2NodeDriver): """ Driver class for EC2 in the South America (Sao Paulo) Region. """ - _datacenter = 'sa-east-1' + + api_name = 'ec2_sa_east' + name = 'Amazon EC2 (sa-east-1)' + friendly_name = 'Amazon South America Sao Paulo' + country = 'BR' + region_name = 'sa-east-1' + connectionCls = EC2SAEastConnection + + +class EC2APSESydneyConnection(EC2Connection): + """ + Connection class for EC2 in the Southeast Asia Pacific (Sydney) Region. + """ + + host = REGION_DETAILS['ap-southeast-2']['endpoint'] class EC2APSESydneyNodeDriver(EC2NodeDriver): """ Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region. """ - _datacenter = 'ap-southeast-2' + + api_name = 'ec2_ap_southeast_2' + name = 'Amazon EC2 (ap-southeast-2)' + friendly_name = 'Amazon Asia-Pacific Sydney' + country = 'AU' + region_name = 'ap-southeast-2' + connectionCls = EC2APSESydneyConnection class EucConnection(EC2Connection): diff --git a/libcloud/compute/drivers/rackspace.py b/libcloud/compute/drivers/rackspace.py index af05e0c529..2f8a329fb0 100644 --- a/libcloud/compute/drivers/rackspace.py +++ b/libcloud/compute/drivers/rackspace.py @@ -19,35 +19,22 @@ from libcloud.compute.base import NodeLocation from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\ OpenStack_1_0_NodeDriver, OpenStack_1_0_Response -from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\ - OpenStack_1_1_NodeDriver from libcloud.common.rackspace import ( AUTH_URL_US, AUTH_URL_UK) -ENDPOINT_ARGS_MAP = { - 'dfw': {'service_type': 'compute', - 'name': 'cloudServersOpenStack', - 'region': 'DFW'}, - 'ord': {'service_type': 'compute', - 'name': 'cloudServersOpenStack', - 'region': 'ORD'}, - 'lon': {'service_type': 'compute', - 'name': 'cloudServersOpenStack', - 'region': 'LON'} -} - - -class RackspaceFirstGenConnection(OpenStack_1_0_Connection): +class RackspaceConnection(OpenStack_1_0_Connection): """ - Connection class for the Rackspace first-gen driver. + Connection class for the Rackspace driver """ + responseCls = OpenStack_1_0_Response auth_url = AUTH_URL_US XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' def get_endpoint(self): + ep = {} if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint(service_type='compute', @@ -61,112 +48,37 @@ def get_endpoint(self): raise LibcloudError('Could not find specified endpoint') -class RackspaceFirstGenNodeDriver(OpenStack_1_0_NodeDriver): - name = 'Rackspace Cloud' - website = 'http://www.rackspace.com' - connectionCls = RackspaceFirstGenConnection - type = Provider.RACKSPACE_FIRST_GEN +class RackspaceNodeDriver(OpenStack_1_0_NodeDriver): + name = 'Rackspace' + website = 'http://www.rackspace.com/' + connectionCls = RackspaceConnection + type = Provider.RACKSPACE api_name = 'rackspace' - def __init__(self, key, secret=None, secure=True, host=None, port=None, - region='us', **kwargs): - """ - @inherits: L{NodeDriver.__init__} - - @param region: Region ID which should be used - @type region: C{str} - """ - if region not in ['us', 'uk']: - raise ValueError('Invalid region: %s' % (region)) - - if region == 'us': - self.connectionCls.auth_url = AUTH_URL_US - elif region == 'uk': - self.connectionCls.auth_url = AUTH_URL_UK - - self.region = region - - super(RackspaceFirstGenNodeDriver, self).__init__(key=key, - secret=secret, - secure=secure, - host=host, - port=port, **kwargs) - def list_locations(self): - """ - Lists available locations + """Lists available locations Locations cannot be set or retrieved via the API, but currently there are two locations, DFW and ORD. @inherits: L{OpenStack_1_0_NodeDriver.list_locations} """ - if self.region == 'us': - locations = [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] - elif self.region == 'uk': - locations = [NodeLocation(0, 'Rackspace UK London', 'UK', self)] - - return locations + return [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] -class RackspaceConnection(OpenStack_1_1_Connection): +class RackspaceUKConnection(RackspaceConnection): """ - Connection class for the Rackspace next-gen OpenStack base driver. + Connection class for the Rackspace UK driver """ - get_endpoint_args = {} + auth_url = AUTH_URL_UK - def get_endpoint(self): - if not self.get_endpoint_args: - raise LibcloudError( - 'RackspaceConnection must have get_endpoint_args set') - # Only support auth 2.0_* - if '2.0' in self._auth_version: - ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) - else: - raise LibcloudError( - 'Auth version "%s" not supported' % (self._auth_version)) - - # It's possible to authenticate but the service catalog not have - # the correct endpoint for this driver, so we throw here. - if 'publicURL' in ep: - return ep['publicURL'] - else: - raise LibcloudError('Could not find specified endpoint') - - -class RackspaceNodeDriver(OpenStack_1_1_NodeDriver): - name = 'Rackspace Cloud' - website = 'http://www.rackspace.com' - connectionCls = RackspaceConnection - type = Provider.RACKSPACE - api_name = None - - def __init__(self, key, secret=None, secure=True, host=None, port=None, - datacenter='dfw', **kwargs): - """ - @inherits: L{NodeDriver.__init__} - - @param datacenter: Datacenter ID which should be used - @type datacenter: C{str} - """ - - if datacenter not in ['dfw', 'ord', 'lon']: - raise ValueError('Invalid datacenter: %s' % (datacenter)) - - if datacenter in ['dfw', 'ord']: - self.connectionCls.auth_url = AUTH_URL_US - self.api_name = 'rackspacenovaus' - elif datacenter == 'lon': - self.connectionCls.auth_url = AUTH_URL_UK - self.api_name = 'rackspacenovalon' - - self.connectionCls._auth_version = '2.0' - self.connectionCls.get_endpoint_args = \ - ENDPOINT_ARGS_MAP[datacenter] +class RackspaceUKNodeDriver(RackspaceNodeDriver): + """Driver for Rackspace in the UK (London) + """ - self.datacenter = datacenter + name = 'Rackspace (UK)' + connectionCls = RackspaceUKConnection - super(RackspaceNodeDriver, self).__init__(key=key, secret=secret, - secure=secure, host=host, - port=port, **kwargs) + def list_locations(self): + return [NodeLocation(0, 'Rackspace UK London', 'UK', self)] diff --git a/libcloud/compute/drivers/rackspacenova.py b/libcloud/compute/drivers/rackspacenova.py new file mode 100644 index 0000000000..6157877f93 --- /dev/null +++ b/libcloud/compute/drivers/rackspacenova.py @@ -0,0 +1,104 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Rackspace driver +""" +from libcloud.compute.providers import Provider +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\ + OpenStack_1_1_NodeDriver +from libcloud.common.types import LibcloudError + + +class RackspaceNovaConnection(OpenStack_1_1_Connection): + get_endpoint_args = {} + + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'RackspaceNovaConnection must have get_endpoint_args set') + + # Only support auth 2.0_* + if '2.0' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + + # It's possible to authenticate but the service catalog not have + # the correct endpoint for this driver, so we throw here. + if 'publicURL' in ep: + return ep['publicURL'] + else: + raise LibcloudError('Could not find specified endpoint') + + +class RackspaceNovaBetaConnection(RackspaceNovaConnection): + + get_endpoint_args = {'service_type': 'compute', + 'name': 'cloudServersPreprod', + 'region': 'DFW'} + + +class RackspaceNovaDfwConnection(RackspaceNovaConnection): + + get_endpoint_args = {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'DFW'} + + +class RackspaceNovaLonConnection(RackspaceNovaConnection): + + get_endpoint_args = {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'LON'} + + +class RackspaceNovaDfwNodeDriver(OpenStack_1_1_NodeDriver): + name = 'RackspaceNovadfw' + website = 'http://www.rackspace.com/' + connectionCls = RackspaceNovaDfwConnection + type = Provider.RACKSPACE_NOVA_DFW + api_name = 'rackspacenovaus' + + +class RackspaceNovaOrdConnection(RackspaceNovaConnection): + + get_endpoint_args = {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'ORD'} + + +class RackspaceNovaOrdNodeDriver(OpenStack_1_1_NodeDriver): + name = 'RackspaceNovaord' + website = 'http://www.rackspace.com/' + connectionCls = RackspaceNovaOrdConnection + type = Provider.RACKSPACE_NOVA_ORD + api_name = 'rackspacenovaus' + + +class RackspaceNovaLonNodeDriver(OpenStack_1_1_NodeDriver): + name = 'RackspaceNovalon' + website = 'http://www.rackspace.com/' + connectionCls = RackspaceNovaLonConnection + type = Provider.RACKSPACE_NOVA_LON + api_name = 'rackspacenovauk' + + +class RackspaceNovaBetaNodeDriver(OpenStack_1_1_NodeDriver): + name = 'RackspaceNovaBeta' + website = 'http://www.rackspace.com/' + connectionCls = RackspaceNovaBetaConnection + type = Provider.RACKSPACE_NOVA_BETA + api_name = 'rackspacenovabeta' diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index bdb1ba9bef..d1c078c2c8 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -18,8 +18,7 @@ from libcloud.utils.misc import get_driver as _get_provider_driver from libcloud.utils.misc import set_driver as _set_provider_driver -from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS -from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING +from libcloud.compute.types import Provider __all__ = [ "Provider", @@ -69,8 +68,8 @@ ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), Provider.RACKSPACE: ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), - Provider.RACKSPACE_FIRST_GEN: - ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'), + Provider.RACKSPACE_UK: + ('libcloud.compute.drivers.rackspace', 'RackspaceUKNodeDriver'), Provider.SLICEHOST: ('libcloud.compute.drivers.slicehost', 'SlicehostNodeDriver'), Provider.VPSNET: @@ -111,6 +110,14 @@ ('libcloud.compute.drivers.vcloud', 'TerremarkDriver'), Provider.CLOUDSTACK: ('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'), + Provider.RACKSPACE_NOVA_BETA: + ('libcloud.compute.drivers.rackspacenova', 'RackspaceNovaBetaNodeDriver'), + Provider.RACKSPACE_NOVA_DFW: + ('libcloud.compute.drivers.rackspacenova', 'RackspaceNovaDfwNodeDriver'), + Provider.RACKSPACE_NOVA_ORD: + ('libcloud.compute.drivers.rackspacenova', 'RackspaceNovaOrdNodeDriver'), + Provider.RACKSPACE_NOVA_LON: + ('libcloud.compute.drivers.rackspacenova', 'RackspaceNovaLonNodeDriver'), Provider.LIBVIRT: ('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'), Provider.JOYENT: @@ -127,17 +134,6 @@ def get_driver(provider): - if provider in DEPRECATED_RACKSPACE_PROVIDERS: - id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()]) - old_name = id_to_name_map[provider] - new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]] - - msg = 'Provider constant %s has been removed. New constant ' \ - 'is now called %s.\n' \ - 'For more information on this change and how to modify your ' \ - 'code to work with it, please visit: TODO' % (old_name, new_name) - raise Exception(msg) - return _get_provider_driver(DRIVERS, provider) def set_driver(provider, module, klass): diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 431944d7d3..e7a393dfc8 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -30,8 +30,6 @@ "MalformedResponseError", "InvalidCredsError", "InvalidCredsException", - "DEPRECATED_RACKSPACE_PROVIDERS", - "OLD_CONSTANT_TO_NEW_MAPPING" ] @@ -73,7 +71,6 @@ class Provider(object): """ DUMMY = 'dummy' EC2 = 'ec2' - RACKSPACE = 'rackspace' SLICEHOST = 'slicehost' GOGRID = 'gogrid' VPSNET = 'vpsnet' @@ -111,11 +108,9 @@ class Provider(object): VCL = 'vcl' KTUCLOUD = 'ktucloud' GRIDSPOT = 'gridspot' - RACKSPACE_FIRST_GEN = 'rackspace_first_gen' HOSTVIRTUAL = 'hostvirtual' ABIQUO = 'abiquo' - # Deprecated constants which are still supported EC2_US_EAST = 'ec2_us_east' EC2_EU = 'ec2_eu' # deprecated name EC2_EU_WEST = 'ec2_eu_west' @@ -126,7 +121,7 @@ class Provider(object): EC2_SA_EAST = 'ec2_sa_east' EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2' - # Deprecated constants which aren't supported anymore + RACKSPACE = 'rackspace' RACKSPACE_UK = 'rackspace_uk' RACKSPACE_NOVA_BETA = 'rackspace_nova_beta' RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw' @@ -134,22 +129,6 @@ class Provider(object): RACKSPACE_NOVA_ORD = 'rackspace_nova_ord' -DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK, - Provider.RACKSPACE_NOVA_BETA, - Provider.RACKSPACE_NOVA_DFW, - Provider.RACKSPACE_NOVA_LON, - Provider.RACKSPACE_NOVA_ORD] -OLD_CONSTANT_TO_NEW_MAPPING = { - Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN, - Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN, - - Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE, - Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE, - Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE, - Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE -} - - class NodeState(object): """ Standard states for a node diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index 5b63b52a37..c7a7ac3c8e 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -45,6 +45,7 @@ from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.types import InvalidContainerNameError +from libcloud.common.types import LazyList from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin @@ -101,14 +102,13 @@ class CloudFilesConnection(OpenStackBaseConnection): Base connection class for the Cloudfiles driver. """ + auth_url = AUTH_URL_US responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse - def __init__(self, user_id, key, secure=True, auth_url=AUTH_URL_US, - **kwargs): + def __init__(self, user_id, key, secure=True, **kwargs): super(CloudFilesConnection, self).__init__(user_id, key, secure=secure, **kwargs) - self.auth_url = auth_url self.api_version = API_VERSION self.accept_format = 'application/json' self.cdn_request = False @@ -131,15 +131,10 @@ def get_endpoint(self): if self.cdn_request: eps = cdn_eps - if self._ex_force_service_region: - eps = [ep for ep in eps if ep['region'].lower() == self._ex_force_service_region.lower()] - if len(eps) == 0: - # TODO: Better error message raise LibcloudError('Could not find specified endpoint') ep = eps[0] - if 'publicURL' in ep: return ep['publicURL'] else: @@ -165,6 +160,22 @@ def request(self, action, params=None, data='', headers=None, method='GET', raw=raw) +class CloudFilesUSConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles US endpoint. + """ + + auth_url = AUTH_URL_US + + +class CloudFilesUKConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles UK endpoint. + """ + + auth_url = AUTH_URL_UK + + class CloudFilesSwiftConnection(CloudFilesConnection): """ Connection class for the Cloudfiles Swift endpoint. @@ -192,7 +203,10 @@ def get_endpoint(self, *args, **kwargs): class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): """ - CloudFiles driver. + Base CloudFiles driver. + + You should never create an instance of this class directly but use US/US + class. """ name = 'CloudFiles' website = 'http://www.rackspace.com/' @@ -201,29 +215,11 @@ class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): hash_type = 'md5' supports_chunked_encoding = True - def __init__(self, key, secret=None, secure=True, host=None, port=None, - datacenter='ord', **kwargs): - """ - @inherits: L{StorageDriver.__init__} - - @param datacenter: Datacenter ID which should be used. - @type datacenter: C{str} - """ - if hasattr(self, '_datacenter'): - datacenter = self._datacenter - - # This is here for backard compatibility - if 'ex_force_service_region' in kwargs: - datacenter = kwargs['ex_force_service_region'] - - self.datacenter = datacenter - - OpenStackDriverMixin.__init__(self, (), **kwargs) - super(CloudFilesStorageDriver, self).__init__(key=key, secret=secret, - secure=secure, host=host, - port=port, **kwargs) + def __init__(self, *args, **kwargs): + OpenStackDriverMixin.__init__(self, *args, **kwargs) + super(CloudFilesStorageDriver, self).__init__(*args, **kwargs) - def iterate_containers(self): + def list_containers(self): response = self.connection.request('') if response.status == httplib.NO_CONTENT: @@ -233,6 +229,10 @@ def iterate_containers(self): raise LibcloudError('Unexpected status code: %s' % (response.status)) + def list_container_objects(self, container): + value_dict = {'container': container} + return LazyList(get_more=self._get_more, value_dict=value_dict) + def get_container(self, container_name): response = self.connection.request('/%s' % (container_name), method='HEAD') @@ -409,26 +409,6 @@ def delete_object(self, obj): raise LibcloudError('Unexpected status code: %s' % (response.status)) - def ex_purge_object_from_cdn(self, obj, email=None): - """ - Purge edge cache for the specified object. - - @param email: Email where a notification will be sent when the job - completes. (optional) - @type email: C{str} - """ - container_name = self._clean_container_name(obj.container.name) - object_name = self._clean_object_name(obj.name) - headers = {'X-Purge-Email': email} if email else {} - - response = self.connection.request('/%s/%s' % (container_name, - object_name), - method='DELETE', - headers=headers, - cdn_request=True) - - return response.status == httplib.NO_CONTENT - def ex_get_meta_data(self): """ Get meta data @@ -638,30 +618,30 @@ def _upload_object_manifest(self, container, object_name, extra=None, return obj - def iterate_container_objects(self, container): + def _get_more(self, last_key, value_dict): + container = value_dict['container'] params = {} - while True: - response = self.connection.request('/%s' % (container.name), - params=params) + if last_key: + params['marker'] = last_key + + response = self.connection.request('/%s' % (container.name), + params=params) - if response.status == httplib.NO_CONTENT: - # Empty or non-existent container - break - elif response.status == httplib.OK: - objects = self._to_object_list(json.loads(response.body), - container) + if response.status == httplib.NO_CONTENT: + # Empty or inexistent container + return [], None, True + elif response.status == httplib.OK: + objects = self._to_object_list(json.loads(response.body), + container) - if len(objects) == 0: - break + # TODO: Is this really needed? + if len(objects) == 0: + return [], None, True - for obj in objects: - yield obj - params['marker'] = obj.name + return objects, objects[-1].name, False - else: - raise LibcloudError('Unexpected status code: %s' % - (response.status)) + raise LibcloudError('Unexpected status code: %s' % (response.status)) def _put_object(self, container, object_name, upload_func, upload_func_kwargs, extra=None, file_path=None, @@ -738,10 +718,15 @@ def _clean_object_name(self, name): def _to_container_list(self, response): # @TODO: Handle more then 10k containers - use "lazy list"? + containers = [] + for container in response: extra = {'object_count': int(container['count']), 'size': int(container['bytes'])} - yield Container(name=container['name'], extra=extra, driver=self) + containers.append(Container(name=container['name'], extra=extra, + driver=self)) + + return containers def _to_object_list(self, response, container): objects = [] @@ -786,15 +771,7 @@ def _headers_to_object(self, name, container, headers): return obj def _ex_connection_class_kwargs(self): - kwargs = {'ex_force_service_region': self.datacenter} - - if self.datacenter in ['dfw', 'ord']: - kwargs['auth_url'] = AUTH_URL_US - elif self.datacenter == 'lon': - kwargs['auth_url'] = AUTH_URL_UK - - kwargs.update(self.openstack_connection_kwargs()) - return kwargs + return self.openstack_connection_kwargs() class CloudFilesUSStorageDriver(CloudFilesStorageDriver): @@ -804,7 +781,7 @@ class CloudFilesUSStorageDriver(CloudFilesStorageDriver): type = Provider.CLOUDFILES_US name = 'CloudFiles (US)' - _datacenter = 'ord' + connectionCls = CloudFilesUSConnection class CloudFilesSwiftStorageDriver(CloudFilesStorageDriver): @@ -833,7 +810,7 @@ class CloudFilesUKStorageDriver(CloudFilesStorageDriver): type = Provider.CLOUDFILES_UK name = 'CloudFiles (UK)' - _datacenter = 'lon' + connectionCls = CloudFilesUKConnection class FileChunkReader(object): @@ -885,7 +862,7 @@ def next(self): raise StopIteration block_size = self.chunk_size - if self.bytes_read + block_size > \ + if self.bytes_read + block_size >\ self.end_block - self.start_block: block_size = self.end_block - self.start_block - self.bytes_read self.stop_iteration = True diff --git a/libcloud/storage/providers.py b/libcloud/storage/providers.py index 94ee89a219..dca7ac531c 100644 --- a/libcloud/storage/providers.py +++ b/libcloud/storage/providers.py @@ -20,8 +20,6 @@ DRIVERS = { Provider.DUMMY: ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), - Provider.CLOUDFILES: - ('libcloud.storage.drivers.cloudfiles', 'CloudFilesStorageDriver'), Provider.S3: ('libcloud.storage.drivers.s3', 'S3StorageDriver'), Provider.S3_US_WEST: @@ -38,19 +36,18 @@ ('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'), Provider.GOOGLE_STORAGE: ('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver'), - Provider.CLOUDFILES_SWIFT: - ('libcloud.storage.drivers.cloudfiles', - 'CloudFilesSwiftStorageDriver'), Provider.NIMBUS: ('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'), Provider.LOCAL: ('libcloud.storage.drivers.local', 'LocalStorageDriver'), - # Deprecated Provider.CLOUDFILES_US: ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), Provider.CLOUDFILES_UK: - ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver') + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'), + Provider.CLOUDFILES_SWIFT: + ('libcloud.storage.drivers.cloudfiles', + 'CloudFilesSwiftStorageDriver') } diff --git a/libcloud/storage/types.py b/libcloud/storage/types.py index d3c3b55817..4230a7bafe 100644 --- a/libcloud/storage/types.py +++ b/libcloud/storage/types.py @@ -53,14 +53,12 @@ class Provider(object): NINEFOLD = 'ninefold' GOOGLE_STORAGE = 'google_storage' S3_US_WEST_OREGON = 's3_us_west_oregon' - CLOUDFILES_SWIFT = 'cloudfiles_swift' NIMBUS = 'nimbus' LOCAL = 'local' - CLOUDFILES = 'cloudfiles' - # Deperecated CLOUDFILES_US = 'cloudfiles_us' CLOUDFILES_UK = 'cloudfiles_uk' + CLOUDFILES_SWIFT = 'cloudfiles_swift' class ContainerError(LibcloudError): diff --git a/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json b/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json index d8ddaf6058..5ea5f6c40d 100644 --- a/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json +++ b/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json @@ -10,11 +10,6 @@ "region": "ORD", "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", "v1Default": true - }, - { - "region": "LON", - "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", - "v1Default": false } ], "cloudFiles": [ @@ -23,12 +18,6 @@ "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS", "v1Default": true, "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS" - }, - { - "region": "LON", - "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS", - "v1Default": false, - "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS" } ], "cloudServers": [ diff --git a/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json b/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json index 74efaccddc..3d586ed2f0 100644 --- a/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json +++ b/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json @@ -28,12 +28,6 @@ "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" - }, - { - "region": "LON", - "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", - "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", - "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" } ], "name": "cloudFiles", diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index ed2987942c..b48d98e562 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -28,7 +28,7 @@ from libcloud.compute.base import Node from libcloud.compute.types import NodeState, DeploymentError, LibcloudError from libcloud.compute.ssh import BaseSSHClient -from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver as Rackspace +from libcloud.compute.drivers.rackspace import RackspaceNodeDriver as Rackspace from libcloud.test import MockHttp, XML_HEADERS from libcloud.test.file_fixtures import ComputeFileFixtures, OpenStackFixtures diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 0172e77116..868d5c8859 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -45,36 +45,15 @@ parse_qsl = cgi.parse_qsl -class BaseEC2Tests(LibcloudTestCase): - def test_instantiate_driver_valid_datacenters(self): - datacenters = REGION_DETAILS.keys() - datacenters = [d for d in datacenters if d != 'nimbus'] - - for datacenter in datacenters: - EC2NodeDriver(*EC2_PARAMS, **{'datacenter': datacenter}) - - def test_instantiate_driver_invalid_datacenters(self): - for datacenter in ['invalid', 'nimbus']: - try: - EC2NodeDriver(*EC2_PARAMS, **{'datacenter': datacenter}) - except ValueError: - pass - else: - self.fail('Invalid region, but exception was not thrown') - - class EC2Tests(LibcloudTestCase, TestCaseMixin): image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml' - datacenter = 'us-east-1' def setUp(self): EC2MockHttp.test = self EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) EC2MockHttp.use_param = 'Action' EC2MockHttp.type = None - - self.driver = EC2NodeDriver(*EC2_PARAMS, - **{'datacenter': self.datacenter}) + self.driver = EC2NodeDriver(*EC2_PARAMS) def test_create_node(self): image = NodeImage(id='ami-be3adfd7', @@ -396,75 +375,6 @@ def test_detach(self): self.assertTrue(retValue) -class EC2USWest1Tests(EC2Tests): - datacenter = 'us-west-1' - - -class EC2USWest2Tests(EC2Tests): - datacenter = 'us-west-2' - - -class EC2EUWestTests(EC2Tests): - datacenter = 'eu-west-1' - - -class EC2APSE1Tests(EC2Tests): - datacenter = 'ap-southeast-1' - - -class EC2APNETests(EC2Tests): - datacenter = 'ap-northeast-1' - - -class EC2APSE2Tests(EC2Tests): - datacenter = 'ap-southeast-2' - - -class EC2SAEastTests(EC2Tests): - datacenter = 'sa-east-1' - - -# Tests for the old, deprecated way of instantiating a driver. -class EC2OldStyleModelTests(EC2Tests): - driver_klass = EC2USWestNodeDriver - - def setUp(self): - EC2MockHttp.test = self - EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) - EC2MockHttp.use_param = 'Action' - EC2MockHttp.type = None - - self.driver = self.driver_klass(*EC2_PARAMS) - - -class EC2USWest1OldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2USWestNodeDriver - - -class EC2USWest2OldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2USWestOregonNodeDriver - - -class EC2EUWestOldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2EUNodeDriver - - -class EC2APSE1OldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2APSENodeDriver - - -class EC2APNEOldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2APNENodeDriver - - -class EC2APSE2OldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2APSESydneyNodeDriver - - -class EC2SAEastOldStyleModelTests(EC2OldStyleModelTests): - driver_klass = EC2SAEastNodeDriver - - class EC2MockHttp(MockHttpTestCase): fixtures = ComputeFileFixtures('ec2') diff --git a/libcloud/test/compute/test_rackspace.py b/libcloud/test/compute/test_rackspace.py index 78ed9f34c4..474b5cfa31 100644 --- a/libcloud/test/compute/test_rackspace.py +++ b/libcloud/test/compute/test_rackspace.py @@ -15,182 +15,25 @@ import sys import unittest -from libcloud.utils.py3 import method_type -from libcloud.utils.py3 import httplib -from libcloud.compute.providers import DEPRECATED_RACKSPACE_PROVIDERS -from libcloud.compute.providers import get_driver -from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver from libcloud.compute.drivers.rackspace import RackspaceNodeDriver from libcloud.test.compute.test_openstack import OpenStack_1_0_Tests -from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests, \ - OpenStack_1_1_MockHttp -from libcloud.pricing import clear_pricing_data -from libcloud.test.secrets import RACKSPACE_NOVA_PARAMS from libcloud.test.secrets import RACKSPACE_PARAMS -class RackspaceusFirstGenUsTests(OpenStack_1_0_Tests): +class RackspaceTests(OpenStack_1_0_Tests): should_list_locations = True should_have_pricing = True - driver_klass = RackspaceFirstGenNodeDriver - driver_type = RackspaceFirstGenNodeDriver - driver_args = RACKSPACE_PARAMS - driver_kwargs = {'region': 'us'} - - def test_error_is_thrown_on_accessing_old_constant(self): - for provider in DEPRECATED_RACKSPACE_PROVIDERS: - try: - get_driver(provider) - except Exception: - e = sys.exc_info()[1] - self.assertTrue(str(e).find('has been removed') != -1) - else: - self.fail('Exception was not thrown') - - def test_list_sizes_pricing(self): - sizes = self.driver.list_sizes() - - for size in sizes: - self.assertTrue(size.price > 0) - - -class RackspaceusFirstGenUkTests(OpenStack_1_0_Tests): - should_list_locations = True - should_have_pricing = True - - driver_klass = RackspaceFirstGenNodeDriver - driver_type = RackspaceFirstGenNodeDriver - driver_args = RACKSPACE_PARAMS - driver_kwargs = {'region': 'uk'} - - def test_list_sizes_pricing(self): - sizes = self.driver.list_sizes() - - for size in sizes: - self.assertTrue(size.price > 0) - - -class RackspaceNovaMockHttp(OpenStack_1_1_MockHttp): - def __init__(self, *args, **kwargs): - super(RackspaceNovaMockHttp, self).__init__(*args, **kwargs) - - methods1 = OpenStack_1_1_MockHttp.__dict__ - - names1 = [m for m in methods1 if m.find('_v1_1') == 0] - - for name in names1: - method = methods1[name] - new_name = name.replace('_v1_1_slug_', '_v2_1337_') - setattr(self, new_name, method_type(method, self, - RackspaceNovaMockHttp)) - - -class RackspaceNovaLonMockHttp(RackspaceNovaMockHttp): - - def _v2_0_tokens(self, method, url, body, headers): - body = self.auth_fixtures.load('_v2_0__auth_lon.json') - return (httplib.OK, body, self.json_content_headers, - httplib.responses[httplib.OK]) - - - -class RackspaceNovaDfwTests(OpenStack_1_1_Tests): - driver_klass = RackspaceNodeDriver driver_type = RackspaceNodeDriver - driver_args = RACKSPACE_NOVA_PARAMS - driver_kwargs = {'datacenter': 'dfw'} - - @classmethod - def create_driver(self): - return self.driver_type(*self.driver_args, **self.driver_kwargs) - - def setUp(self): - self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp, - RackspaceNovaMockHttp) - self.driver_klass.connectionCls.auth_url = \ - 'https://auth.api.example.com/v2.0/' - self.driver = self.create_driver() - # normally authentication happens lazily, but we force it here - self.driver.connection._populate_hosts_and_request_paths() - clear_pricing_data() - self.node = self.driver.list_nodes()[1] - - def test_service_catalog(self): - self.assertEqual( - 'https://dfw.servers.api.rackspacecloud.com/v2/1337', - self.driver.connection.get_endpoint()) - - -class RackspaceNovaOrdTests(OpenStack_1_1_Tests): - - driver_klass = RackspaceNodeDriver - driver_type = RackspaceNodeDriver - driver_args = RACKSPACE_NOVA_PARAMS - driver_kwargs = {'datacenter': 'ord'} - - @classmethod - def create_driver(self): - return self.driver_type(*self.driver_args, **self.driver_kwargs) - - def setUp(self): - self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp, - RackspaceNovaMockHttp) - self.driver_klass.connectionCls.auth_url = \ - 'https://auth.api.example.com/v2.0/' - self.driver = self.create_driver() - # normally authentication happens lazily, but we force it here - self.driver.connection._populate_hosts_and_request_paths() - clear_pricing_data() - self.node = self.driver.list_nodes()[1] - - def test_list_sizes_pricing(self): - sizes = self.driver.list_sizes() - - for size in sizes: - if size.ram > 256: - self.assertTrue(size.price > 0) - - def test_service_catalog(self): - self.assertEqual('https://ord.servers.api.rackspacecloud.com/v2/1337', - self.driver.connection.get_endpoint()) - - -class RackspaceNovaLonTests(OpenStack_1_1_Tests): - - driver_klass = RackspaceNodeDriver - driver_type = RackspaceNodeDriver - driver_args = RACKSPACE_NOVA_PARAMS - driver_kwargs = {'datacenter': 'lon'} - - @classmethod - def create_driver(self): - return self.driver_type(*self.driver_args, **self.driver_kwargs) - - def setUp(self): - self.driver_klass.connectionCls.conn_classes = \ - (RackspaceNovaLonMockHttp, RackspaceNovaLonMockHttp) - self.driver_klass.connectionCls.auth_url = \ - 'https://lon.auth.api.example.com/v2.0/' - self.driver = self.create_driver() - # normally authentication happens lazily, but we force it here - self.driver.connection._populate_hosts_and_request_paths() - clear_pricing_data() - self.node = self.driver.list_nodes()[1] + driver_args = RACKSPACE_PARAMS def test_list_sizes_pricing(self): sizes = self.driver.list_sizes() for size in sizes: - if size.ram > 256: - self.assertTrue(size.price > 0) - - def test_service_catalog(self): - self.assertEqual('https://lon.servers.api.rackspacecloud.com/v2/1337', - self.driver.connection.get_endpoint()) - + self.assertTrue(size.price > 0) if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/compute/test_rackspacenova.py b/libcloud/test/compute/test_rackspacenova.py new file mode 100644 index 0000000000..dd7a5b090b --- /dev/null +++ b/libcloud/test/compute/test_rackspacenova.py @@ -0,0 +1,149 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.utils.py3 import method_type +from libcloud.utils.py3 import httplib +from libcloud.compute.drivers.rackspacenova import RackspaceNovaBetaNodeDriver, \ + RackspaceNovaDfwNodeDriver, \ + RackspaceNovaOrdNodeDriver, \ + RackspaceNovaLonNodeDriver +from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests, OpenStack_1_1_MockHttp +from libcloud.pricing import clear_pricing_data + +from libcloud.test.secrets import RACKSPACE_NOVA_PARAMS + + +class RackspaceNovaMockHttp(OpenStack_1_1_MockHttp): + def __init__(self, *args, **kwargs): + super(RackspaceNovaMockHttp, self).__init__(*args, **kwargs) + + methods1 = OpenStack_1_1_MockHttp.__dict__ + + names1 = [m for m in methods1 if m.find('_v1_1') == 0] + + for name in names1: + method = methods1[name] + new_name = name.replace('_v1_1_slug_', '_v2_1337_') + setattr(self, new_name, method_type(method, self, + RackspaceNovaMockHttp)) + + +class RackspaceNovaLonMockHttp(RackspaceNovaMockHttp): + + def _v2_0_tokens(self, method, url, body, headers): + body = self.auth_fixtures.load('_v2_0__auth_lon.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + +class RackspaceNovaBetaTests(OpenStack_1_1_Tests): + + driver_klass = RackspaceNovaBetaNodeDriver + driver_type = RackspaceNovaBetaNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp, RackspaceNovaMockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v2.0/" + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_service_catalog(self): + self.assertEqual('https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337', self.driver.connection.get_endpoint()) + + +class RackspaceNovaDfwTests(OpenStack_1_1_Tests): + + driver_klass = RackspaceNovaDfwNodeDriver + driver_type = RackspaceNovaDfwNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp, RackspaceNovaMockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v2.0/" + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_service_catalog(self): + self.assertEqual('https://dfw.servers.api.rackspacecloud.com/v2/1337', self.driver.connection.get_endpoint()) + + +class RackspaceNovaOrdTests(OpenStack_1_1_Tests): + + driver_klass = RackspaceNovaOrdNodeDriver + driver_type = RackspaceNovaOrdNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp, RackspaceNovaMockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v2.0/" + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_service_catalog(self): + self.assertEqual('https://ord.servers.api.rackspacecloud.com/v2/1337', self.driver.connection.get_endpoint()) + + +class RackspaceNovaLonTests(OpenStack_1_1_Tests): + + driver_klass = RackspaceNovaLonNodeDriver + driver_type = RackspaceNovaLonNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (RackspaceNovaLonMockHttp, RackspaceNovaLonMockHttp) + self.driver_klass.connectionCls.auth_url = "https://lon.auth.api.example.com/v2.0/" + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_service_catalog(self): + self.assertEqual('https://lon.servers.api.rackspacecloud.com/v2/1337', self.driver.connection.get_endpoint()) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index 17d20c835b..89e046fc9e 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -41,31 +41,24 @@ from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver -from libcloud.storage.drivers.cloudfiles import CloudFilesUSStorageDriver -from libcloud.storage.drivers.cloudfiles import CloudFilesUKStorageDriver from libcloud.storage.drivers.dummy import DummyIterator from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 -from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import StorageFileFixtures, OpenStackFixtures # pylint: disable-msg=E0611 +current_hash = None + class CloudFilesTests(unittest.TestCase): - driver_klass = CloudFilesStorageDriver - driver_args = ('dummy', 'dummy') - driver_kwargs = {} - datacenter = 'ord' def setUp(self): - self.driver_klass.connectionCls.conn_classes = ( + CloudFilesStorageDriver.connectionCls.conn_classes = ( None, CloudFilesMockHttp) - self.driver_klass.connectionCls.rawResponseCls = \ + CloudFilesStorageDriver.connectionCls.rawResponseCls = \ CloudFilesMockRawResponse CloudFilesMockHttp.type = None CloudFilesMockRawResponse.type = None - self.driver = self.driver_klass(*self.driver_args, - **self.driver_kwargs) - + self.driver = CloudFilesStorageDriver('dummy', 'dummy') # normally authentication happens lazily, but we force it here self.driver.connection._populate_hosts_and_request_paths() self._remove_test_file() @@ -73,23 +66,6 @@ def setUp(self): def tearDown(self): self._remove_test_file() - def test_invalid_ex_force_service_region(self): - driver = CloudFilesStorageDriver('driver', 'dummy', - ex_force_service_region='invalid') - - try: - driver.list_containers() - except: - e = sys.exc_info()[1] - self.assertEquals(e.value, 'Could not find specified endpoint') - else: - self.fail('Exception was not thrown') - - def test_ex_force_service_region(self): - driver = CloudFilesStorageDriver('driver', 'dummy', - ex_force_service_region='ORD') - driver.list_containers() - def test_force_auth_token_kwargs(self): base_url = 'https://cdn2.clouddrive.com/v1/MossoCloudFS' kwargs = { @@ -128,10 +104,8 @@ def test_invalid_json_throws_exception(self): self.fail('Exception was not thrown') def test_service_catalog(self): - url = 'https://storage101.%s1.clouddrive.com/v1/MossoCloudFS' % \ - (self.datacenter) self.assertEqual( - url, + 'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS', self.driver.connection.get_endpoint()) self.driver.connection.cdn_request = True @@ -292,7 +266,7 @@ def test_download_object_success(self): destination_path=destination_path, overwrite_existing=False, delete_on_failure=True) - self.assertTrue(result) + #self.assertTrue(result) def test_download_object_invalid_file_size(self): CloudFilesMockRawResponse.type = 'INVALID_SIZE' @@ -523,26 +497,6 @@ def test_ex_get_meta_data(self): self.assertTrue('bytes_used' in meta_data) self.assertTrue('temp_url_key' in meta_data) - def test_ex_purge_object_from_cdn(self): - CloudFilesMockHttp.type = 'PURGE_SUCCESS' - container = Container(name='foo_bar_container', extra={}, - driver=self.driver) - obj = Object(name='object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=self) - - self.assertTrue(self.driver.ex_purge_object_from_cdn(obj=obj)) - - def test_ex_purge_object_from_cdn_with_email(self): - CloudFilesMockHttp.type = 'PURGE_SUCCESS_EMAIL' - container = Container(name='foo_bar_container', extra={}, - driver=self.driver) - obj = Object(name='object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=self) - self.assertTrue(self.driver.ex_purge_object_from_cdn(obj=obj, - email='test@test.com')) - @mock.patch('os.path.getsize') def test_ex_multipart_upload_object_for_small_files(self, getsize_mock): getsize_mock.return_value = 0 @@ -684,9 +638,9 @@ def test_ex_get_object_temp_url(self, time): "/v1/MossoCloudFS/foo_bar_container/foo_bar_object") sig = hmac.new(b('foo'), b(hmac_body), sha1).hexdigest() ret = self.driver.ex_get_object_temp_url(obj, 'GET') - temp_url = 'https://storage101.%s1.clouddrive.com/v1/MossoCloudFS/foo_bar_container/foo_bar_object?temp_url_expires=60&temp_url_sig=%s' % (self.datacenter, sig) + temp_url = 'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS/foo_bar_container/foo_bar_object?temp_url_expires=60&temp_url_sig=%s' % (sig) - self.assertEquals(''.join(sorted(ret)), ''.join(sorted(temp_url))) + self.assertEquals(ret, temp_url) def test_ex_get_object_temp_url_no_key_raises_key_error(self): self.driver.ex_get_meta_data = mock.Mock() @@ -709,17 +663,7 @@ def _remove_test_file(self): pass -class CloudFilesDeprecatedUSTests(CloudFilesTests): - driver_klass = CloudFilesUSStorageDriver - datacenter = 'ord' - - -class CloudFilesDeprecatedUKTests(CloudFilesTests): - driver_klass = CloudFilesUKStorageDriver - datacenter = 'lon' - - -class CloudFilesMockHttp(StorageMockHttp, MockHttpTestCase): +class CloudFilesMockHttp(StorageMockHttp): fixtures = StorageFileFixtures('cloudfiles') auth_fixtures = OpenStackFixtures() @@ -889,25 +833,6 @@ def _v1_MossoCloudFS_foo_bar_container(self, method, url, body, headers): status_code = httplib.ACCEPTED return (status_code, body, headers, httplib.responses[httplib.OK]) - def _v1_MossoCloudFS_foo_bar_container_object_PURGE_SUCCESS( - self, method, url, body, headers): - - if method == 'DELETE': - # test_ex_purge_from_cdn - headers = self.base_headers - status_code = httplib.NO_CONTENT - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_object_PURGE_SUCCESS_EMAIL( - self, method, url, body, headers): - - if method == 'DELETE': - # test_ex_purge_from_cdn_with_email - self.assertEqual(headers['X-Purge-Email'], 'test@test.com') - headers = self.base_headers - status_code = httplib.NO_CONTENT - return (status_code, body, headers, httplib.responses[httplib.OK]) - def _v1_MossoCloudFS_foo_bar_container_NOT_FOUND( self, method, url, body, headers): @@ -992,7 +917,8 @@ def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( self, method, url, body, headers): # test_download_object_success - body = self._generate_random_data(1000) + body = 'test' + self._data = self._generate_random_data(1000) return (httplib.OK, body, self.base_headers, @@ -1001,7 +927,8 @@ def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_INVALID_SIZE( self, method, url, body, headers): # test_download_object_invalid_file_size - body = self._generate_random_data(100) + body = 'test' + self._data = self._generate_random_data(100) return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) From 4adcf3f023f7af06e4bd1eb9a00a2e0d79e5c7b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 06:10:47 +0000 Subject: [PATCH 003/143] Fix typo. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439029 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 4d1d82a3ff..e7fd685316 100644 --- a/CHANGES +++ b/CHANGES @@ -117,7 +117,7 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-267) [Tomaz Muraus] - - Support for multipart uploads and other improvemetns in the S3 driver + - Support for multipart uploads and other improvements in the S3 driver so it can more easily be re-used with other implementations (e.g. Google Storage, etc.). From 11487c3b848b265107ee6c377594a2be1d5a51c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 06:28:19 +0000 Subject: [PATCH 004/143] Fix the old rackspace next-gen stuff. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439030 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/compute/drivers/rackspacenova.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libcloud/compute/drivers/rackspacenova.py b/libcloud/compute/drivers/rackspacenova.py index 6157877f93..cdba0d3318 100644 --- a/libcloud/compute/drivers/rackspacenova.py +++ b/libcloud/compute/drivers/rackspacenova.py @@ -19,9 +19,13 @@ from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\ OpenStack_1_1_NodeDriver from libcloud.common.types import LibcloudError +from libcloud.common.rackspace import ( + AUTH_URL_US, AUTH_URL_UK) class RackspaceNovaConnection(OpenStack_1_1_Connection): + auth_url = AUTH_URL_US + _auth_version = '2.0' get_endpoint_args = {} def get_endpoint(self): @@ -59,6 +63,7 @@ class RackspaceNovaDfwConnection(RackspaceNovaConnection): class RackspaceNovaLonConnection(RackspaceNovaConnection): + auth_url = AUTH_URL_UK get_endpoint_args = {'service_type': 'compute', 'name': 'cloudServersOpenStack', From 068b1345a08b9435038c621fda2d5fcffadb7dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 06:30:27 +0000 Subject: [PATCH 005/143] Set release version. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439031 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 02c230ee70..f0b7fe2491 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.0-dev' +__version__ = '0.12.0' try: import paramiko From 7538214709c8761582cef0643e39334c3f4953c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 27 Jan 2013 07:01:19 +0000 Subject: [PATCH 006/143] Fix type string names. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439033 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/compute/types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index e7a393dfc8..84081a777b 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -70,7 +70,7 @@ class Provider(object): @cvar ABIQUO: Abiquo driver """ DUMMY = 'dummy' - EC2 = 'ec2' + EC2 = 'ec2_us_east' SLICEHOST = 'slicehost' GOGRID = 'gogrid' VPSNET = 'vpsnet' @@ -112,7 +112,7 @@ class Provider(object): ABIQUO = 'abiquo' EC2_US_EAST = 'ec2_us_east' - EC2_EU = 'ec2_eu' # deprecated name + EC2_EU = 'ec2_eu_west' # deprecated name EC2_EU_WEST = 'ec2_eu_west' EC2_US_WEST = 'ec2_us_west' EC2_AP_SOUTHEAST = 'ec2_ap_southeast' From 0683c52455cb0d41403a434119b1918389cfa7c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 28 Jan 2013 01:47:48 +0000 Subject: [PATCH 007/143] Include ELB fixes. Contributed by John Carr, part of LIBCLOUD-284. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439221 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/loadbalancer/drivers/elb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libcloud/loadbalancer/drivers/elb.py b/libcloud/loadbalancer/drivers/elb.py index f60c8b03dd..0b408d5b5a 100644 --- a/libcloud/loadbalancer/drivers/elb.py +++ b/libcloud/loadbalancer/drivers/elb.py @@ -35,6 +35,8 @@ class ELBResponse(AWSGenericResponse): Amazon ELB response class. """ namespace = NS + exceptions = {} + xpath = 'Error' class ELBConnection(SignedAWSConnection): @@ -76,7 +78,7 @@ def create_balancer(self, name, port, protocol, algorithm, members, } for i, z in enumerate(ex_members_availability_zones): - zone = '-'.join((self.region, z)) + zone = ''.join((self.region, z)) params['AvailabilityZones.member.%d' % (i + 1)] = zone data = self.connection.request(ROOT, params=params).object From b31d6d386aaf23ca00745b5b7b040275a4eaa8b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 28 Jan 2013 02:24:49 +0000 Subject: [PATCH 008/143] Backport commit r1439222 from trunk: Improve error handling in the Brightbox driver. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439227 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 +++ libcloud/common/brightbox.py | 24 ++++++++++++++++-------- libcloud/compute/drivers/brightbox.py | 2 +- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/CHANGES b/CHANGES index e7fd685316..3c7c8dd40d 100644 --- a/CHANGES +++ b/CHANGES @@ -127,6 +127,9 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-269) [Mahendra M] + - Improve error handling in the Brightbox driver. + [Tomaz Muraus] + *) DNS - Update 'if type' checks in the update_record methods to behave correctly diff --git a/libcloud/common/brightbox.py b/libcloud/common/brightbox.py index 6b7c078b5b..7346ba0ac2 100644 --- a/libcloud/common/brightbox.py +++ b/libcloud/common/brightbox.py @@ -19,6 +19,7 @@ from libcloud.compute.types import InvalidCredsError from libcloud.utils.py3 import b +from libcloud.utils.py3 import httplib try: import simplejson as json @@ -28,10 +29,10 @@ class BrightboxResponse(JsonResponse): def success(self): - return self.status >= 200 and self.status < 400 + return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST def parse_body(self): - if self.headers['content-type'].split('; ')[0] == 'application/json': + if self.headers['content-type'].split(';')[0] == 'application/json': return super(BrightboxResponse, self).parse_body() else: return self.body @@ -39,7 +40,15 @@ def parse_body(self): def parse_error(self): response = super(BrightboxResponse, self).parse_body() - return '%s: %s' % (response['error_name'], response['errors'][0]) + if 'error' in response: + if response['error'] in ['invalid_client', 'unauthorized_client']: + raise InvalidCredsError(response['error']) + + return response['error'] + elif 'error_name' in response: + return '%s: %s' % (response['error_name'], response['errors'][0]) + + return self.body class BrightboxConnection(ConnectionUserAndKey): @@ -54,7 +63,7 @@ def _fetch_oauth_token(self): body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' % - (self.user_id, self.key)))).rstrip() + (self.user_id, self.key)))).rstrip() self.connect() @@ -69,12 +78,11 @@ def _fetch_oauth_token(self): response = self.connection.getresponse() - if response.status == 200: + if response.status == httplib.OK: return json.loads(response.read())['access_token'] else: - message = '%s (%s)' % (json.loads(response.read())['error'], - response.status) - + responseCls = BrightboxResponse(response=response, connection=self) + message = responseCls.parse_error() raise InvalidCredsError(message) def add_default_headers(self, headers): diff --git a/libcloud/compute/drivers/brightbox.py b/libcloud/compute/drivers/brightbox.py index e10533f510..1747fdb46e 100644 --- a/libcloud/compute/drivers/brightbox.py +++ b/libcloud/compute/drivers/brightbox.py @@ -80,7 +80,7 @@ def _to_node(self, data): public_ips=[cloud_ip['public_ip'] for cloud_ip in data['cloud_ips']] + - [interface['ipv6_address'] + [interface['ipv6_address'] for interface in data['interfaces'] if 'ipv6_address' in interface], From 14dd764b5a9506db1b4556716c75b46e3321feba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 28 Jan 2013 04:15:44 +0000 Subject: [PATCH 009/143] Backport commits from r1439233:r1439246 from trunk: Fix the ScriptDeployment step to work correctly if user provides a relative path for the script argument. Contributed by Jaume devesa, part of LIBCLOUD-278. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439248 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 10 +- libcloud/compute/ssh.py | 15 +++ libcloud/test/compute/test_ssh_client.py | 149 ++++++++++++++++++++++- setup.py | 10 +- tox.ini | 2 + 5 files changed, 176 insertions(+), 10 deletions(-) diff --git a/CHANGES b/CHANGES index 3c7c8dd40d..3d5377ff44 100644 --- a/CHANGES +++ b/CHANGES @@ -98,6 +98,13 @@ Changes with Apache Libcloud 0.12.0: 'create_node' method. (LIBCLOUD-282) [Joe Miller, Tomaz Muraus] + - Improve error handling in the Brightbox driver. + [Tomaz Muraus] + + - Fix the ScriptDeployment step to work correctly if user provides a + relative path for the script argument. (LIBCLOUD-278) + [Jaume Devesa] + *) Storage - Add a new local storage driver. @@ -127,9 +134,6 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-269) [Mahendra M] - - Improve error handling in the Brightbox driver. - [Tomaz Muraus] - *) DNS - Update 'if type' checks in the update_record methods to behave correctly diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index c1a0521ed0..f171a8fde4 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -29,6 +29,7 @@ # Ref: https://bugs.launchpad.net/paramiko/+bug/392973 from os.path import split as psplit +from os.path import join as pjoin class BaseSSHClient(object): @@ -120,6 +121,7 @@ def close(self): class ParamikoSSHClient(BaseSSHClient): + """ A SSH Client powered by Paramiko. """ @@ -179,6 +181,19 @@ def delete(self, path): sftp.close() def run(self, cmd): + if cmd[0] != '/': + # If 'cmd' based on relative path, + # set the absoute path joining the HOME path + sftp = self.client.open_sftp() + # Chdir to its own directory is mandatory because otherwise + # the 'getcwd()' method returns None + sftp.chdir('.') + cwd = sftp.getcwd() + sftp.close() + + # Join the command to the current path + cmd = pjoin(cwd, cmd) + # based on exec_command() bufsize = -1 t = self.client.get_transport() diff --git a/libcloud/test/compute/test_ssh_client.py b/libcloud/test/compute/test_ssh_client.py index 91412dbbef..df3e26ece5 100644 --- a/libcloud/test/compute/test_ssh_client.py +++ b/libcloud/test/compute/test_ssh_client.py @@ -17,13 +17,156 @@ import sys import unittest -import libcloud.compute.ssh +from libcloud.compute.ssh import ParamikoSSHClient +from libcloud.compute.ssh import have_paramiko -from mock import Mock +from mock import patch, Mock + +if not have_paramiko: + ParamikoSSHClient = None class ParamikoSSHClientTests(unittest.TestCase): - pass + @patch('paramiko.SSHClient', Mock) + def setUp(self): + """ + Creates the object patching the actual connection. + """ + conn_params = {'hostname': 'dummy.host.org', + 'port': 8822, + 'username': 'ubuntu', + 'key': '~/.ssh/ubuntu_ssh', + 'timeout': '600'} + self.ssh_cli = ParamikoSSHClient(**conn_params) + + @patch('paramiko.SSHClient', Mock) + def test_create_with_password(self): + """ + Initialize object with password. + + Just to have better coverage, initialize the object + with the 'password' value instead of the 'key'. + """ + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'password': 'ubuntu'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'password': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + + @patch('paramiko.SSHClient', Mock) + def test_create_without_credentials(self): + """ + Initialize object with no credentials. + + Just to have better coverage, initialize the object + without 'password' neither 'key'. + """ + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'hostname': 'dummy.host.org', + 'allow_agent': True, + 'look_for_keys': True, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + + def test_basic_usage_absolute_path(self): + """ + Basic execution. + """ + mock = self.ssh_cli + # script to execute + sd = "/root/random_script.sh" + + # Connect behavior + mock.connect() + mock_cli = mock.client # The actual mocked object: SSHClient + expected_conn = {'username': 'ubuntu', + 'key_filename': '~/.ssh/ubuntu_ssh', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'timeout': '600', + 'port': 8822} + mock_cli.connect.assert_called_once_with(**expected_conn) + + mock.put(sd) + # Make assertions over 'put' method + mock_cli.open_sftp().chdir.assert_called_with('root') + mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', + mode='w') + + mock.run(sd) + # Make assertions over 'run' method + mock_cli.get_transport().open_session().exec_command \ + .assert_called_once_with(sd) + + mock.close() + + def test_run_script_with_relative_path(self): + """ + Execute script with relative path. + """ + mock = self.ssh_cli + + # Define behaviour then ask for 'current directory' + mock.client.open_sftp().getcwd.return_value = '/home/ubuntu/' + + # Script without full path + sd = 'random_script.sh' + + # Without assertions because they are the same than the previous + # 'test_basic_usage' method + mock.connect() + + mock_cli = mock.client # The actual mocked object: SSHClient + + mock.put(sd, chmod=600) + # Make assertions over 'put' method + mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', + mode='w') + mock_cli.open_sftp().file().chmod.assert_called_once_with(600) + + mock.run(sd) + # Make assertions over the 'run' method + mock_cli.open_sftp().chdir.assert_called_with(".") + mock_cli.open_sftp().getcwd.assert_called_once() + full_sd = '/home/ubuntu/random_script.sh' + mock_cli.get_transport().open_session().exec_command \ + .assert_called_once_with(full_sd) + + mock.close() + + def test_delete_script(self): + """ + Provide a basic test with 'delete' action. + """ + mock = self.ssh_cli + # script to execute + sd = '/root/random_script.sh' + + mock.connect() + + mock.delete(sd) + # Make assertions over the 'delete' method + mock.client.open_sftp().unlink.assert_called_with(sd) + + mock.close() + +if not ParamikoSSHClient: + class ParamikoSSHClientTests(unittest.TestCase): + pass if __name__ == '__main__': diff --git a/setup.py b/setup.py index 33f7a38590..3e57feefd8 100644 --- a/setup.py +++ b/setup.py @@ -51,6 +51,9 @@ ', '.join(SUPPORTED_VERSIONS)) sys.exit(1) +# pre-2.6 will need the ssl PyPI package +pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6) + def read_version_string(): version = None @@ -106,8 +109,6 @@ def _run_tests(self): print("Please copy the new secret.py-dist file over otherwise" + " tests might fail") - pre_python26 = (sys.version_info[0] == 2 - and sys.version_info[1] < 6) if pre_python26: missing = [] # test for dependencies @@ -130,6 +131,9 @@ def _run_tests(self): testfiles = [] for test_path in TEST_PATHS: for t in glob(pjoin(self._dir, test_path, 'test_*.py')): + if pre_python26 and 'test_ssh_client' in t: + # TODO: Need to update mock library on buildslave + continue testfiles.append('.'.join( [test_path.replace('/', '.'), splitext(basename(t))[0]])) @@ -215,8 +219,6 @@ def run(self): cov.save() cov.html_report() -# pre-2.6 will need the ssl PyPI package -pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6) setup( name='apache-libcloud', diff --git a/tox.ini b/tox.ini index 721f54a5e6..8243f8f823 100644 --- a/tox.ini +++ b/tox.ini @@ -5,6 +5,7 @@ envlist = py25,py26,py27,pypy,py32,py33 [testenv] deps = mock lockfile + paramiko commands = python setup.py test [testenv:py25] @@ -12,6 +13,7 @@ deps = mock lockfile ssl simplejson + paramiko [testenv:py32] deps = mock From be6d5e6925406a4f24c30159acaa111578c9fd77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 28 Jan 2013 05:02:10 +0000 Subject: [PATCH 010/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439253 13f79535-47bb-0310-9956-ffa450edef68 --- setup.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/setup.py b/setup.py index 3e57feefd8..61629f20ca 100644 --- a/setup.py +++ b/setup.py @@ -131,9 +131,6 @@ def _run_tests(self): testfiles = [] for test_path in TEST_PATHS: for t in glob(pjoin(self._dir, test_path, 'test_*.py')): - if pre_python26 and 'test_ssh_client' in t: - # TODO: Need to update mock library on buildslave - continue testfiles.append('.'.join( [test_path.replace('/', '.'), splitext(basename(t))[0]])) From f15e5613c616f2cc27ddd2d8bb815cbb03fed832 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 28 Jan 2013 05:14:28 +0000 Subject: [PATCH 011/143] Backport commit r1439254 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1439256 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/dns/drivers/route53.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 26d9a59d8c..64918ade6f 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -134,8 +134,16 @@ def get_zone(self, zone_id): def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) record_type, name = record_id.split(':', 1) + if name: + full_name = ".".join((name, zone.domain)) + else: + full_name = zone.domain self.connection.set_context({'zone_id': zone_id}) - params = urlencode({'name': name, 'type': record_type}) + params = urlencode({ + 'name': full_name, + 'type': record_type, + 'maxitems': '1' + }) uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params data = self.connection.request(uri).object @@ -232,7 +240,7 @@ def _post_changeset(self, zone, changes_list): rrs = ET.SubElement(change, 'ResourceRecordSet') ET.SubElement(rrs, 'Name').text = name + "." + zone.domain ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_] - ET.SubElement(rrs, 'TTL').text = extra.get('ttl', '0') + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) rrecs = ET.SubElement(rrs, 'ResourceRecords') rrec = ET.SubElement(rrecs, 'ResourceRecord') From 575908c166191650ee892917dc10d81175347ade Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 29 Jan 2013 21:14:50 +0000 Subject: [PATCH 012/143] Backport commit r1426126 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1440135 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 6 +++--- libcloud/compute/drivers/elasticstack.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index 3d5377ff44..44d397aa05 100644 --- a/CHANGES +++ b/CHANGES @@ -67,9 +67,9 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-253) [L. Schaub] - - Modify ElasticStack driver class to not pass 'vnc:ip auto' argument to the - API when creating a server. It looks like this argument is not supported - anymore. + - Modify ElasticStack driver class to pass 'vnc auto' instead of + 'vnc:ip auto' argument to the API when creating a server. + It looks like 'vnc:ip' has been replaced with 'vnc'. [Rick Copeland, Tomaz Muraus] - Add new EC2 instance type - High Storage Eight Extra Large Instance diff --git a/libcloud/compute/drivers/elasticstack.py b/libcloud/compute/drivers/elasticstack.py index a5ea190c25..7058dbd285 100644 --- a/libcloud/compute/drivers/elasticstack.py +++ b/libcloud/compute/drivers/elasticstack.py @@ -308,7 +308,7 @@ def create_node(self, **kwargs): node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) if vnc_password: - node_data.update({'vnc:password': vnc_password}) + node_data.update({'vnc': 'auto', 'vnc:password': vnc_password}) response = self.connection.request( action='/servers/create', data=json.dumps(node_data), From fb892ec43bf366ac3c4ddf59a9210fc8e6fe662c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 30 Jan 2013 07:14:35 +0000 Subject: [PATCH 013/143] Backport commit r1440289 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1440296 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 7 +++ libcloud/httplib_ssl.py | 2 +- libcloud/security.py | 18 ++++++++ libcloud/test/test_httplib_ssl.py | 76 ++++++++++++++++++++++++++++--- libcloud/utils/py3.py | 11 ++++- 5 files changed, 106 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 44d397aa05..e2add7a89f 100644 --- a/CHANGES +++ b/CHANGES @@ -20,6 +20,13 @@ Changes with Apache Libcloud 0.12.0: set_driver method. (LIBCLOUD-255) [Mahendra M] + - Allow user to explicitly specify which CA file is used for verifying + the server certificate by setting 'SSL_CERT_FILE' environment variable. + + Note: When this variable is specified, the specified path is the only + CA file which is used to verifying the server certificate. (LIBCLOUD-283) + [Tomaz Muraus, Erinn Looney-Triggs] + *) Compute - Fix string interpolation bug in __repr__ methods in the IBM SCE driver. diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 1e713b263b..4709f27cda 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -68,7 +68,7 @@ def _setup_ca_cert(self): ca_certs_available = [cert for cert in libcloud.security.CA_CERTS_PATH - if os.path.exists(cert)] + if os.path.exists(cert) and os.path.isfile(cert)] if ca_certs_available: # use first available certificate self.ca_cert = ca_certs_available[0] diff --git a/libcloud/security.py b/libcloud/security.py index 8436248c05..72d532882b 100644 --- a/libcloud/security.py +++ b/libcloud/security.py @@ -23,6 +23,8 @@ libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt") """ +import os + VERIFY_SSL_CERT = True VERIFY_SSL_CERT_STRICT = True @@ -42,6 +44,22 @@ '/opt/local/share/curl/curl-ca-bundle.crt', ] +# Allow user to explicitly specify which CA bundle to use, using an environment +# variable +environment_cert_file = os.getenv('SSL_CERT_FILE', None) +if environment_cert_file is not None: + # Make sure the file exists + if not os.path.exists(environment_cert_file): + raise ValueError('Certificate file %s doesn\'t exist' % + (environment_cert_file)) + + if not os.path.isfile(environment_cert_file): + raise ValueError('Certificate file can\'t be a directory') + + # If a provided file exists we ignore other common paths because we + # don't want to fall-back to a potentially less restrictive bundle + CA_CERTS_PATH = [environment_cert_file] + CA_CERTS_UNAVAILABLE_WARNING_MSG = ( 'Warning: No CA Certificates were found in CA_CERTS_PATH. ' 'Toggling VERIFY_SSL_CERT to False.' diff --git a/libcloud/test/test_httplib_ssl.py b/libcloud/test/test_httplib_ssl.py index 3a236b12b7..548d21fae1 100644 --- a/libcloud/test/test_httplib_ssl.py +++ b/libcloud/test/test_httplib_ssl.py @@ -13,19 +13,64 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys import unittest import os.path +import warnings + +from mock import patch import libcloud.security + +from libcloud.utils.py3 import reload from libcloud.httplib_ssl import LibcloudHTTPSConnection +ORIGINAL_CA_CERS_PATH = libcloud.security.CA_CERTS_PATH + class TestHttpLibSSLTests(unittest.TestCase): def setUp(self): libcloud.security.VERIFY_SSL_CERT = False + libcloud.security.CA_CERTS_PATH = ORIGINAL_CA_CERS_PATH self.httplib_object = LibcloudHTTPSConnection('foo.bar') + def test_custom_ca_path_using_env_var_doesnt_exist(self): + os.environ['SSL_CERT_FILE'] = '/foo/doesnt/exist' + + try: + reload(libcloud.security) + except ValueError: + e = sys.exc_info()[1] + msg = 'Certificate file /foo/doesnt/exist doesn\'t exist' + self.assertEqual(str(e), msg) + else: + self.fail('Exception was not thrown') + + def test_custom_ca_path_using_env_var_is_directory(self): + file_path = os.path.dirname(os.path.abspath(__file__)) + os.environ['SSL_CERT_FILE'] = file_path + + try: + reload(libcloud.security) + except ValueError: + e = sys.exc_info()[1] + msg = 'Certificate file can\'t be a directory' + self.assertEqual(str(e), msg) + else: + self.fail('Exception was not thrown') + + def test_custom_ca_path_using_env_var_exist(self): + # When setting a path we don't actually check that a valid CA file is + # provied. + # This happens later in the code in httplib_ssl.connect method + file_path = os.path.abspath(__file__) + os.environ['SSL_CERT_FILE'] = file_path + + reload(libcloud.security) + + self.assertEqual(libcloud.security.CA_CERTS_PATH, [file_path]) + def test_verify_hostname(self): cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', 'subject': ((('countryName', 'US'),), @@ -142,19 +187,28 @@ def test_get_common_name(self): self.assertEqual(self.httplib_object._get_common_name({}), None) - def test_setup_verify(self): - # @TODO: catch warnings - # non-strict mode,s hould just emit a warning + @patch('warnings.warn') + def test_setup_verify(self, _): + libcloud.security.CA_CERTS_PATH = [] + + # non-strict mode should just emit a warning libcloud.security.VERIFY_SSL_CERT = True libcloud.security.VERIFY_SSL_CERT_STRICT = False self.httplib_object._setup_verify() + warnings.warn.assert_called_once_with( + libcloud.security.CA_CERTS_UNAVAILABLE_WARNING_MSG) + # strict mode, should throw a runtime error libcloud.security.VERIFY_SSL_CERT = True libcloud.security.VERIFY_SSL_CERT_STRICT = True + try: self.httplib_object._setup_verify() - except: + except RuntimeError: + e = sys.exc_info()[1] + msg = libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG + self.assertEqual(str(e), msg) pass else: self.fail('Exception not thrown') @@ -163,24 +217,34 @@ def test_setup_verify(self): libcloud.security.VERIFY_SSL_CERT_STRICT = False self.httplib_object._setup_verify() - def test_setup_ca_cert(self): - # @TODO: catch warnings + @patch('warnings.warn') + def test_setup_ca_cert(self, _): + # verify = False, _setup_ca_cert should be a no-op self.httplib_object.verify = False self.httplib_object.strict = False self.httplib_object._setup_ca_cert() self.assertEqual(self.httplib_object.ca_cert, None) + # verify = True, a valid path is provided, self.ca_cert should be set to + # a valid path self.httplib_object.verify = True libcloud.security.CA_CERTS_PATH = [os.path.abspath(__file__)] self.httplib_object._setup_ca_cert() + self.assertTrue(self.httplib_object.ca_cert is not None) + # verify = True, no CA certs are available, warning should be emitted libcloud.security.CA_CERTS_PATH = [] self.httplib_object._setup_ca_cert() + + warnings.warn.assert_called_once_with( + libcloud.security.CA_CERTS_UNAVAILABLE_WARNING_MSG) + self.assertFalse(self.httplib_object.ca_cert) self.assertFalse(self.httplib_object.verify) + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 648704e532..4537f7a486 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -24,9 +24,10 @@ import types from xml.etree import ElementTree as ET -PY3 = False PY2 = False PY25 = False +PY3 = False +PY32 = False if sys.version_info >= (2, 0) and sys.version_info < (3, 0): PY2 = True @@ -37,6 +38,9 @@ if sys.version_info >= (3, 0): PY3 = True +if sys.version_info >= (3, 2) and sys.version_info < (3, 3): + PY32 = True + if PY3: import http.client as httplib from io import StringIO @@ -118,3 +122,8 @@ def relpath(path, start=posixpath.curdir): if not rel_list: return posixpath.curdir return posixpath.join(*rel_list) + +if PY32: + from imp import reload +else: + from __builtin__ import reload From a806885d6cb1b161b76a6badd8e67c0a7d442b8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 31 Jan 2013 02:44:27 +0000 Subject: [PATCH 014/143] Backport commit 1440817 from trunk: Add new DNS driver for Gandi.net provider. Also perform pep8 cleanup on the existing compute driver tests. Contributed by John Carr, part of LIBCLOUD-281. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1440820 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 + libcloud/common/gandi.py | 26 +- libcloud/dns/drivers/gandi.py | 264 ++++++++++++++++ libcloud/dns/providers.py | 2 + libcloud/dns/types.py | 5 +- libcloud/test/common/test_gandi.py | 33 ++ libcloud/test/compute/test_gandi.py | 237 +++++++------- .../test/dns/fixtures/gandi/create_record.xml | 31 ++ .../test/dns/fixtures/gandi/create_zone.xml | 43 +++ .../test/dns/fixtures/gandi/delete_record.xml | 8 + .../gandi/delete_record_doesnotexist.xml | 8 + .../test/dns/fixtures/gandi/delete_zone.xml | 8 + .../dns/fixtures/gandi/delete_zone_fail.xml | 8 + libcloud/test/dns/fixtures/gandi/get_zone.xml | 43 +++ .../test/dns/fixtures/gandi/list_records.xml | 88 ++++++ .../dns/fixtures/gandi/list_records_empty.xml | 14 + .../test/dns/fixtures/gandi/list_zones.xml | 138 +++++++++ .../test/dns/fixtures/gandi/new_version.xml | 8 + libcloud/test/dns/test_gandi.py | 289 ++++++++++++++++++ libcloud/test/secrets.py-dist | 1 + 20 files changed, 1127 insertions(+), 130 deletions(-) create mode 100644 libcloud/dns/drivers/gandi.py create mode 100644 libcloud/test/common/test_gandi.py create mode 100644 libcloud/test/dns/fixtures/gandi/create_record.xml create mode 100644 libcloud/test/dns/fixtures/gandi/create_zone.xml create mode 100644 libcloud/test/dns/fixtures/gandi/delete_record.xml create mode 100644 libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml create mode 100644 libcloud/test/dns/fixtures/gandi/delete_zone.xml create mode 100644 libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml create mode 100644 libcloud/test/dns/fixtures/gandi/get_zone.xml create mode 100644 libcloud/test/dns/fixtures/gandi/list_records.xml create mode 100644 libcloud/test/dns/fixtures/gandi/list_records_empty.xml create mode 100644 libcloud/test/dns/fixtures/gandi/list_zones.xml create mode 100644 libcloud/test/dns/fixtures/gandi/new_version.xml create mode 100644 libcloud/test/dns/test_gandi.py diff --git a/CHANGES b/CHANGES index e2add7a89f..d169ca106b 100644 --- a/CHANGES +++ b/CHANGES @@ -154,6 +154,9 @@ Changes with Apache Libcloud 0.12.0: - Finish Amazon Route53 driver. (LIBCLOUD-132) [John Carr] + - Add new driver for Gandi provider (https://www.gandi.net). (LIBCLOUD-281) + [John Carr] + *) Load-Balancer - Add new driver for AWS Elastic Load Balancing service. (LIBCLOUD-169) diff --git a/libcloud/common/gandi.py b/libcloud/common/gandi.py index 3eed567c13..d27ce3e616 100644 --- a/libcloud/common/gandi.py +++ b/libcloud/common/gandi.py @@ -93,7 +93,15 @@ def request(self, method, *args): return getattr(self._proxy, method)(self.key, *args) except xmlrpclib.Fault: e = sys.exc_info()[1] - raise GandiException(1001, e) + self.parse_error(e.faultCode, e.faultString) + raise GandiException(1001, e.faultString) + + def parse_error(self, code, message): + """ + This hook allows you to inspect any xmlrpclib errors and + potentially raise a more useful and specific exception. + """ + pass class BaseGandiDriver(object): @@ -121,8 +129,8 @@ def __init__(self, key, secret=None, secure=False): self.connection.driver = self # Specific methods for gandi - def _wait_operation(self, id, \ - timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): + def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, + check_interval=DEFAULT_INTERVAL): """ Wait for an operation to succeed""" for i in range(0, timeout, check_interval): @@ -131,7 +139,7 @@ def _wait_operation(self, id, \ if op['step'] == 'DONE': return True - if op['step'] in ['ERROR', 'CANCEL']: + if op['step'] in ['ERROR', 'CANCEL']: return False except (KeyError, IndexError): pass @@ -172,8 +180,9 @@ def get_uuid(self): Note, for example, that this example will always produce the same UUID! """ - return hashlib.sha1(b("%s:%s:%s" % \ - (self.uuid_prefix, self.id, self.driver.type))).hexdigest() + hashstring = "%s:%s:%s" % \ + (self.uuid_prefix, self.id, self.driver.type) + return hashlib.sha1(b(hashstring)).hexdigest() class IPAddress(BaseObject): @@ -202,7 +211,7 @@ class NetworkInterface(BaseObject): uuid_prefix = 'if:' def __init__(self, id, state, mac_address, driver, - ips=None, node_id=None, extra=None): + ips=None, node_id=None, extra=None): super(NetworkInterface, self).__init__(id, state, driver) self.mac = mac_address self.ips = ips or {} @@ -225,5 +234,6 @@ def __init__(self, id, state, name, driver, size, extra=None): self.extra = extra or {} def __repr__(self): - return (('') + return ( + ('') % (self.id, self.name, self.state, self.size, self.driver.name)) diff --git a/libcloud/dns/drivers/gandi.py b/libcloud/dns/drivers/gandi.py new file mode 100644 index 0000000000..703ce3ce9a --- /dev/null +++ b/libcloud/dns/drivers/gandi.py @@ -0,0 +1,264 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'GandiDNSDriver' +] + +from libcloud.common.gandi import BaseGandiDriver, GandiConnection +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import RecordError +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +TTL_MIN = 30 +TTL_MAX = 2592000 # 30 days + + +class NewZoneVersion(object): + """ + Changes to a zone in the Gandi DNS service need to be wrapped in a new + version object. The changes are made to the new version, then that + version is made active. + + In effect, this is a transaction. + + Any calls made inside this context manager will be applied to a new version + id. If your changes are succesful (and only if they are successful) they + are activated. + """ + + def __init__(self, driver, zone): + self.driver = driver + self.connection = driver.connection + self.zone = zone + + def __enter__(self): + zid = int(self.zone.id) + self.connection.set_context({'zone_id': self.zone.id}) + vid = self.connection.request('domain.zone.version.new', zid) + self.vid = vid + return vid + + def __exit__(self, type, value, traceback): + if not traceback: + zid = int(self.zone.id) + con = self.connection + con.set_context({'zone_id': self.zone.id}) + con.request('domain.zone.version.set', zid, self.vid) + + +class GandiDNSConnection(GandiConnection): + + def parse_error(self, code, message): + if code == 581042: + zone_id = str(self.context.get('zone_id', None)) + raise ZoneDoesNotExistError(value='', driver=self.driver, + zone_id=zone_id) + + +class GandiDNSDriver(BaseGandiDriver, DNSDriver): + """ + API reference can be found at: + + http://doc.rpc.gandi.net/domain/reference.html + """ + + type = Provider.GANDI + name = 'Gandi DNS' + website = 'http://www.gandi.net/domain' + + connectionCls = GandiDNSConnection + + RECORD_TYPE_MAP = { + RecordType.NS: 'NS', + RecordType.MX: 'MX', + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.TXT: 'TXT', + RecordType.SRV: 'SRV', + RecordType.SPF: 'SPF', + RecordType.WKS: 'WKS', + RecordType.LOC: 'LOC', + } + + def _to_zone(self, zone): + return Zone( + id=zone['id'], + domain=zone['name'], + type='master', + ttl=0, + driver=self, + extra={} + ) + + def _to_zones(self, zones): + ret = [] + for z in zones: + ret.append(self._to_zone(z)) + return ret + + def list_zones(self): + zones = self.connection.request('domain.zone.list') + return self._to_zones(zones) + + def get_zone(self, zone_id): + zid = int(zone_id) + self.connection.set_context({'zone_id': zid}) + zone = self.connection.request('domain.zone.info', zid) + return self._to_zone(zone) + + def create_zone(self, domain, type='master', ttl=None, extra=None): + params = {'name': domain} + info = self.connection.request('domain.zone.create', params) + return self._to_zone(info) + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + zid = int(zone.id) + params = {'name': domain} + self.connection.set_context({'zone_id': zid}) + zone = self.connection.request('domain.zone.update', zid, params) + return self._to_zone(zone) + + def delete_zone(self, zone): + zid = int(zone.id) + self.connection.set_context({'zone_id': zid}) + res = self.connection.request('domain.zone.delete', zid) + return res + + def _to_record(self, record, zone): + return Record( + id='%s:%s' % (record['type'], record['name']), + name=record['name'], + type=self._string_to_record_type(record['type']), + data=record['value'], + zone=zone, + driver=self, + extra={'ttl': record['ttl']} + ) + + def _to_records(self, records, zone): + retval = [] + for r in records: + retval.append(self._to_record(r, zone)) + return retval + + def list_records(self, zone): + zid = int(zone.id) + self.connection.set_context({'zone_id': zid}) + records = self.connection.request('domain.zone.record.list', zid, 0) + return self._to_records(records, zone) + + def get_record(self, zone_id, record_id): + zid = int(zone_id) + record_type, name = record_id.split(':', 1) + filter_opts = { + 'name': name, + 'type': record_type + } + self.connection.set_context({'zone_id': zid}) + records = self.connection.request('domain.zone.record.list', + zid, 0, filter_opts) + + if len(records) == 0: + raise RecordDoesNotExistError(value='', driver=self, + record_id=record_id) + + return self._to_record(records[0], self.get_zone(zone_id)) + + def _validate_record(self, record_id, name, record_type, data, extra): + if len(data) > 1024: + raise RecordError('Record data must be <= 1024 characters', + driver=self, record_id=record_id) + if extra and 'ttl' in extra: + if extra['ttl'] < TTL_MIN: + raise RecordError('TTL must be at least 30 seconds', + driver=self, record_id=record_id) + if extra['ttl'] > TTL_MAX: + raise RecordError('TTL must not excdeed 30 days', + driver=self, record_id=record_id) + + def create_record(self, name, zone, type, data, extra=None): + self._validate_record(None, name, type, data, extra) + + zid = int(zone.id) + + create = { + 'name': name, + 'type': self.RECORD_TYPE_MAP[type], + 'value': data + } + + if 'ttl' in extra: + create['ttl'] = extra['ttl'] + + with NewZoneVersion(self, zone) as vid: + con = self.connection + con.set_context({'zone_id': zid}) + rec = con.request('domain.zone.record.add', + zid, vid, create) + + return self._to_record(rec, zone) + + def update_record(self, record, name, type, data, extra): + self._validate_record(record.id, name, type, data, extra) + + filter_opts = { + 'name': record.name, + 'type': self.RECORD_TYPE_MAP[record.type] + } + + update = { + 'name': name, + 'type': self.RECORD_TYPE_MAP[type], + 'value': data + } + + if 'ttl' in extra: + update['ttl'] = extra['ttl'] + + zid = int(record.zone.id) + + with NewZoneVersion(self, record.zone) as vid: + con = self.connection + con.set_context({'zone_id': zid}) + con.request('domain.zone.record.delete', + zid, vid, filter_opts) + res = con.request('domain.zone.record.add', + zid, vid, update) + + return self._to_record(res, record.zone) + + def delete_record(self, record): + zid = int(record.zone.id) + + filter_opts = { + 'name': record.name, + 'type': self.RECORD_TYPE_MAP[record.type] + } + + with NewZoneVersion(self, record.zone) as vid: + con = self.connection + con.set_context({'zone_id': zid}) + count = con.request('domain.zone.record.delete', + zid, vid, filter_opts) + + if count == 1: + return True + + raise RecordDoesNotExistError(value='No such record', driver=self, + record_id=record.id) diff --git a/libcloud/dns/providers.py b/libcloud/dns/providers.py index 6376269b94..bdd704e093 100644 --- a/libcloud/dns/providers.py +++ b/libcloud/dns/providers.py @@ -32,6 +32,8 @@ ('libcloud.dns.drivers.hostvirtual', 'HostVirtualDNSDriver'), Provider.ROUTE53: ('libcloud.dns.drivers.route53', 'Route53DNSDriver'), + Provider.GANDI: + ('libcloud.dns.drivers.gandi', 'GandiDNSDriver') } diff --git a/libcloud/dns/types.py b/libcloud/dns/types.py index 091a1c2b9a..7acb1e47ce 100644 --- a/libcloud/dns/types.py +++ b/libcloud/dns/types.py @@ -35,6 +35,7 @@ class Provider(object): RACKSPACE_UK = 'rackspace_uk' ROUTE53 = 'route53' HOSTVIRTUAL = 'hostvirtual' + GANDI = 'gandi' class RecordType(object): @@ -57,6 +58,8 @@ class RecordType(object): REDIRECT = 13 GEO = 14 URL = 15 + WKS = 16 + LOC = 17 @classmethod def __repr__(self, value): @@ -67,7 +70,7 @@ def __repr__(self, value): class ZoneError(LibcloudError): error_type = 'ZoneError' kwargs = ('zone_id', ) - + def __init__(self, value, driver, zone_id): self.zone_id = zone_id super(ZoneError, self).__init__(value=value, driver=driver) diff --git a/libcloud/test/common/test_gandi.py b/libcloud/test/common/test_gandi.py new file mode 100644 index 0000000000..635266086b --- /dev/null +++ b/libcloud/test/common/test_gandi.py @@ -0,0 +1,33 @@ +import sys +import unittest + +from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import xmlrpclib + + +class MockGandiTransport(xmlrpclib.Transport): + + def request(self, host, handler, request_body, verbose=0): + self.verbose = 0 + method = ET.XML(request_body).find('methodName').text + mock = self.mockCls(host, 80) + mock.request('POST', '%s/%s' % (handler, method)) + resp = mock.getresponse() + + if sys.version[0] == '2' and sys.version[2] == '7': + response = self.parse_response(resp) + else: + response = self.parse_response(resp.body) + return response + + +class BaseGandiTests(unittest.TestCase): + + def setUp(self): + d = self.driverCls + t = self.transportCls + t.mockCls.type = None + d.connectionCls.proxyCls.transportCls = \ + [t, t] + self.driver = d(*self.params) diff --git a/libcloud/test/compute/test_gandi.py b/libcloud/test/compute/test_gandi.py index 91f567b09c..ff0e037d99 100644 --- a/libcloud/test/compute/test_gandi.py +++ b/libcloud/test/compute/test_gandi.py @@ -21,7 +21,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib -from libcloud.compute.drivers.gandi import GandiNodeDriver as Gandi +from libcloud.compute.drivers.gandi import GandiNodeDriver from libcloud.compute.base import StorageVolume from libcloud.common.gandi import GandiException from libcloud.compute.types import NodeState @@ -30,32 +30,113 @@ from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import GANDI_PARAMS +from libcloud.test.common.test_gandi import MockGandiTransport, BaseGandiTests -class MockGandiTransport(xmlrpclib.Transport): +class GandiMockHttp(MockHttp): - def request(self, host, handler, request_body, verbose=0): - self.verbose = 0 - method = ET.XML(request_body).find('methodName').text - mock = GandiMockHttp(host, 80) - mock.request('POST', "%s/%s" % (handler, method)) - resp = mock.getresponse() + fixtures = ComputeFileFixtures('gandi') - if sys.version[0] == '2' and sys.version[2] == '7': - response = self.parse_response(resp) - else: - response = self.parse_response(resp.body) - return response + def _xmlrpc__datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _xmlrpc__image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) -class GandiTests(unittest.TestCase): + def _xmlrpc__vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - node_name = 'test2' + def _xmlrpc__ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_delete(self, method, url, body, headers): + body = self.fixtures.load('vm_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_reboot(self, method, url, body, headers): + body = self.fixtures.load('vm_reboot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_stop(self, method, url, body, headers): + body = self.fixtures.load('vm_stop.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__iface_list(self, method, url, body, headers): + body = self.fixtures.load('iface_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def setUp(self): - Gandi.connectionCls.proxyCls.transportCls = \ - [MockGandiTransport, MockGandiTransport] - self.driver = Gandi(*GANDI_PARAMS) + def _xmlrpc__disk_list(self, method, url, body, headers): + body = self.fixtures.load('disk_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_iface_attach(self, method, url, body, headers): + body = self.fixtures.load('iface_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_iface_detach(self, method, url, body, headers): + body = self.fixtures.load('iface_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_disk_attach(self, method, url, body, headers): + body = self.fixtures.load('disk_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_disk_detach(self, method, url, body, headers): + body = self.fixtures.load('disk_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_create(self, method, url, body, headers): + body = self.fixtures.load('disk_create.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_create_from(self, method, url, body, headers): + body = self.fixtures.load('disk_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_info(self, method, url, body, headers): + body = self.fixtures.load('disk_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_update(self, method, url, body, headers): + body = self.fixtures.load('disk_update.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_delete(self, method, url, body, headers): + body = self.fixtures.load('disk_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class DummyTransport(MockGandiTransport): + mockCls = GandiMockHttp + + +class GandiTests(BaseGandiTests): + + driverCls = GandiNodeDriver + transportCls = DummyTransport + params = GANDI_PARAMS + + node_name = 'test2' def test_list_nodes(self): nodes = self.driver.list_nodes() @@ -64,12 +145,12 @@ def test_list_nodes(self): def test_list_locations(self): loc = list(filter(lambda x: 'france' in x.country.lower(), - self.driver.list_locations()))[0] + self.driver.list_locations()))[0] self.assertEqual(loc.country, 'France') def test_list_images(self): loc = list(filter(lambda x: 'france' in x.country.lower(), - self.driver.list_locations()))[0] + self.driver.list_locations()))[0] images = self.driver.list_images(loc) self.assertTrue(len(images) > 2) @@ -79,7 +160,8 @@ def test_list_sizes(self): def test_destroy_node_running(self): nodes = self.driver.list_nodes() - test_node = list(filter(lambda x: x.state == NodeState.RUNNING, nodes))[0] + test_node = list(filter(lambda x: x.state == NodeState.RUNNING, + nodes))[0] self.assertTrue(self.driver.destroy_node(test_node)) def test_destroy_node_halted(self): @@ -90,29 +172,34 @@ def test_destroy_node_halted(self): def test_reboot_node(self): nodes = self.driver.list_nodes() - test_node = list(filter(lambda x: x.state == NodeState.RUNNING, nodes))[0] + test_node = list(filter(lambda x: x.state == NodeState.RUNNING, + nodes))[0] self.assertTrue(self.driver.reboot_node(test_node)) def test_create_node(self): login = 'libcloud' passwd = ''.join(random.choice(string.ascii_letters) - for i in range(10)) + for i in range(10)) + # Get france datacenter loc = list(filter(lambda x: 'france' in x.country.lower(), - self.driver.list_locations()))[0] + self.driver.list_locations()))[0] + # Get a debian image images = self.driver.list_images(loc) images = [x for x in images if x.name.lower().startswith('debian')] img = list(filter(lambda x: '5' in x.name, images))[0] + # Get a configuration size size = self.driver.list_sizes()[0] node = self.driver.create_node(name=self.node_name, login=login, - password=passwd, image=img, location=loc, size=size) + password=passwd, image=img, + location=loc, size=size) self.assertEqual(node.name, self.node_name) def test_create_volume(self): loc = list(filter(lambda x: 'france' in x.country.lower(), - self.driver.list_locations()))[0] + self.driver.list_locations()))[0] volume = self.driver.create_volume( size=1024, name='libcloud', location=loc) self.assertEqual(volume.name, 'libcloud') @@ -125,7 +212,7 @@ def test_list_volumes(self): def test_destroy_volume(self): volumes = self.driver.list_volumes() test_vol = list(filter(lambda x: x.name == 'test_disk', - volumes))[0] + volumes))[0] self.assertTrue(self.driver.destroy_volume(test_vol)) def test_attach_volume(self): @@ -160,104 +247,12 @@ def test_ex_snapshot_disk(self): disks = self.driver.list_volumes() self.assertTrue(self.driver.ex_snapshot_disk(disks[2])) self.assertRaises(GandiException, - self.driver.ex_snapshot_disk, disks[0]) + self.driver.ex_snapshot_disk, disks[0]) def test_ex_update_disk(self): disks = self.driver.list_volumes() self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) -class GandiMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('gandi') - - def _xmlrpc__datacenter_list(self, method, url, body, headers): - body = self.fixtures.load('datacenter_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__image_list(self, method, url, body, headers): - body = self.fixtures.load('image_list_dc0.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_list(self, method, url, body, headers): - body = self.fixtures.load('vm_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__ip_list(self, method, url, body, headers): - body = self.fixtures.load('ip_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__account_info(self, method, url, body, headers): - body = self.fixtures.load('account_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_info(self, method, url, body, headers): - body = self.fixtures.load('vm_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_delete(self, method, url, body, headers): - body = self.fixtures.load('vm_delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__operation_info(self, method, url, body, headers): - body = self.fixtures.load('operation_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_create_from(self, method, url, body, headers): - body = self.fixtures.load('vm_create_from.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_reboot(self, method, url, body, headers): - body = self.fixtures.load('vm_reboot.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_stop(self, method, url, body, headers): - body = self.fixtures.load('vm_stop.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__iface_list(self, method, url, body, headers): - body = self.fixtures.load('iface_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_list(self, method, url, body, headers): - body = self.fixtures.load('disk_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_iface_attach(self, method, url, body, headers): - body = self.fixtures.load('iface_attach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_iface_detach(self, method, url, body, headers): - body = self.fixtures.load('iface_detach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_disk_attach(self, method, url, body, headers): - body = self.fixtures.load('disk_attach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_disk_detach(self, method, url, body, headers): - body = self.fixtures.load('disk_detach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_create(self, method, url, body, headers): - body = self.fixtures.load('disk_create.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_create_from(self, method, url, body, headers): - body = self.fixtures.load('disk_create_from.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_info(self, method, url, body, headers): - body = self.fixtures.load('disk_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_update(self, method, url, body, headers): - body = self.fixtures.load('disk_update.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_delete(self, method, url, body, headers): - body = self.fixtures.load('disk_delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/dns/fixtures/gandi/create_record.xml b/libcloud/test/dns/fixtures/gandi/create_record.xml new file mode 100644 index 0000000000..13b6cbe62c --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/create_record.xml @@ -0,0 +1,31 @@ + + + + + + + + id + 47234 + + + name + www + + + ttl + 0 + + + type + A + + + value + 127.0.0.1 + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/create_zone.xml b/libcloud/test/dns/fixtures/gandi/create_zone.xml new file mode 100644 index 0000000000..e8b7a1e58a --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/create_zone.xml @@ -0,0 +1,43 @@ + + + + + + + + date_updated + 20101028T12:38:17 + + + domains + 0 + + + id + 47234 + + + name + t.com + + + owner + AB3917-GANDI + + + public + 0 + + + version + 1 + + + versions + + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/delete_record.xml b/libcloud/test/dns/fixtures/gandi/delete_record.xml new file mode 100644 index 0000000000..32aaf65d11 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/delete_record.xml @@ -0,0 +1,8 @@ + + + + + 1 + + + diff --git a/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml b/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml new file mode 100644 index 0000000000..aaa5471235 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml @@ -0,0 +1,8 @@ + + + + + 0 + + + diff --git a/libcloud/test/dns/fixtures/gandi/delete_zone.xml b/libcloud/test/dns/fixtures/gandi/delete_zone.xml new file mode 100644 index 0000000000..efe775c400 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/delete_zone.xml @@ -0,0 +1,8 @@ + + + + + 1 + + + diff --git a/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml b/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml new file mode 100644 index 0000000000..3601450683 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml @@ -0,0 +1,8 @@ + + + + + 0 + + + diff --git a/libcloud/test/dns/fixtures/gandi/get_zone.xml b/libcloud/test/dns/fixtures/gandi/get_zone.xml new file mode 100644 index 0000000000..e8b7a1e58a --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/get_zone.xml @@ -0,0 +1,43 @@ + + + + + + + + date_updated + 20101028T12:38:17 + + + domains + 0 + + + id + 47234 + + + name + t.com + + + owner + AB3917-GANDI + + + public + 0 + + + version + 1 + + + versions + + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/list_records.xml b/libcloud/test/dns/fixtures/gandi/list_records.xml new file mode 100644 index 0000000000..e91c565448 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/list_records.xml @@ -0,0 +1,88 @@ + + + + + + + + + + + id + 47234 + + + name + wibble + + + ttl + 86400 + + + type + CNAME + + + value + t.com + + + + + + + + id + 47234 + + + name + www + + + ttl + 86400 + + + type + A + + + value + 208.111.35.173 + + + + + + + + id + 47234 + + + name + blahblah + + + ttl + 86400 + + + type + A + + + value + 208.111.35.173 + + + + + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/list_records_empty.xml b/libcloud/test/dns/fixtures/gandi/list_records_empty.xml new file mode 100644 index 0000000000..528b45c587 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/list_records_empty.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/list_zones.xml b/libcloud/test/dns/fixtures/gandi/list_zones.xml new file mode 100644 index 0000000000..09258a9a35 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/list_zones.xml @@ -0,0 +1,138 @@ + + + + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 47234 + + + name + t.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48170 + + + name + newbug.net + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48017 + + + name + newblah.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 47288 + + + name + fromapi.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48008 + + + name + blahnew.com + + + public + 0 + + + version + 1 + + + + + + + + + + diff --git a/libcloud/test/dns/fixtures/gandi/new_version.xml b/libcloud/test/dns/fixtures/gandi/new_version.xml new file mode 100644 index 0000000000..32aaf65d11 --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/new_version.xml @@ -0,0 +1,8 @@ + + + + + 1 + + + diff --git a/libcloud/test/dns/test_gandi.py b/libcloud/test/dns/test_gandi.py new file mode 100644 index 0000000000..03b755974e --- /dev/null +++ b/libcloud/test/dns/test_gandi.py @@ -0,0 +1,289 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import xmlrpclib +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.gandi import GandiDNSDriver +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_GANDI +from libcloud.test.common.test_gandi import MockGandiTransport, BaseGandiTests + +Fault = xmlrpclib.Fault + +class GandiMockHttp(MockHttp): + fixtures = DNSFileFixtures('gandi') + + def _xmlrpc__domain_zone_create(self, method, url, body, headers): + body = self.fixtures.load('create_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_update(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_list(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_add(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_update(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + raise Fault(581042, "Zone does not exist") + + def _xmlrpc__domain_zone_info_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + raise Fault(581042, "Zone does not exist") + + def _xmlrpc__domain_zone_list_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + raise Fault(581042, "Zone does not exist") + + def _xmlrpc__domain_zone_delete_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + raise Fault(581042, "Zone does not exist") + + def _xmlrpc__domain_zone_record_list_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('list_records_empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('delete_record_doesnotexist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class DummyTransport(MockGandiTransport): + mockCls = GandiMockHttp + + +class GandiTests(BaseGandiTests): + + driverCls = GandiDNSDriver + transportCls = DummyTransport + params = DNS_GANDI + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 10) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 5) + + zone = zones[0] + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 3) + + record = records[1] + self.assertEqual(record.name, 'www') + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '208.111.35.173') + + def test_get_zone(self): + zone = self.driver.get_zone(zone_id='47234') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_get_record(self): + record = self.driver.get_record(zone_id='47234', + record_id='CNAME:t.com') + self.assertEqual(record.name, 'wibble') + self.assertEqual(record.type, RecordType.CNAME) + self.assertEqual(record.data, 't.com') + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_does_not_exist(self): + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='47234') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '47234') + else: + self.fail('Exception was not thrown') + + def test_get_record_zone_does_not_exist(self): + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4444', record_id='CNAME:t.com') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + GandiMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='47234', + record_id='CNAME:t.com') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone(self): + zone = self.driver.create_zone(domain='t.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.domain, 't.com') + + def test_update_zone(self): + zone = self.driver.get_zone(zone_id='47234') + zone = self.driver.update_zone(zone, domain='t.com') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_create_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record( + name='www', zone=zone, + type=RecordType.A, data='127.0.0.1', + extra={'ttl': 30} + ) + + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[1] + + params = { + 'record': record, + 'name': 'www', + 'type': RecordType.A, + 'data': '127.0.0.1', + 'extra': {'ttl': 30}} + updated_record = self.driver.update_record(**params) + + self.assertEqual(record.data, '208.111.35.173') + + self.assertEqual(updated_record.id, 'A:www') + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.A) + self.assertEqual(updated_record.data, '127.0.0.1') + + def test_delete_zone(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + GandiMockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index ef9916b25f..3ba7dc5715 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -54,3 +54,4 @@ DNS_PARAMS_ZERIGO = ('email', 'api token') DNS_PARAMS_RACKSPACE = ('user', 'key') DNS_PARAMS_HOSTVIRTUAL = ('key',) DNS_PARAMS_ROUTE53 = ('access_id', 'secret') +DNS_GANDI = ('user', ) From 309c569947a5d67a5b0001da4769ea4e0842681c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 31 Jan 2013 02:53:32 +0000 Subject: [PATCH 015/143] Backport commit 1440821 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1440824 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/dns/drivers/gandi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libcloud/dns/drivers/gandi.py b/libcloud/dns/drivers/gandi.py index 703ce3ce9a..83f2933ef2 100644 --- a/libcloud/dns/drivers/gandi.py +++ b/libcloud/dns/drivers/gandi.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import with_statement + __all__ = [ 'GandiDNSDriver' ] From e6dd0b79cb75e624d6a8cd111d649ec1b6143090 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 5 Feb 2013 19:50:18 +0000 Subject: [PATCH 016/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1442714 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 26 + libcloud/common/gandi.py | 88 +-- libcloud/common/hostvirtual.py | 6 +- libcloud/common/xmlrpc.py | 108 ++++ libcloud/compute/drivers/ec2.py | 10 + libcloud/compute/drivers/gandi.py | 58 +- libcloud/compute/drivers/hostvirtual.py | 109 +++- libcloud/compute/drivers/softlayer.py | 473 +++++++--------- libcloud/compute/drivers/vcl.py | 97 +--- libcloud/data/pricing.json | 24 +- libcloud/dns/drivers/gandi.py | 60 +- libcloud/dns/drivers/hostvirtual.py | 2 - libcloud/test/common/test_gandi.py | 38 +- .../fixtures/hostvirtual/get_node.json | 16 + .../fixtures/hostvirtual/list_nodes.json | 134 ++--- .../fixtures/hostvirtual/list_sizes.json | 66 ++- .../fixtures/softlayer/SoftLayer_Account.xml | 17 + .../test/compute/fixtures/softlayer/empty.xml | 2 + .../test/compute/fixtures/softlayer/fail.xml | 17 + .../v3_SoftLayer_Account_getVirtualGuests.xml | 5 +- ...yer_Location_Datacenter_getDatacenters.xml | 26 +- ...__SoftLayer_Virtual_Guest_createObject.xml | 142 +++++ ...r_Virtual_Guest_getCreateObjectOptions.xml | 67 +++ .../v3__SoftLayer_Virtual_Guest_getObject.xml | 519 ++++++++++++++++++ libcloud/test/compute/test_ec2.py | 4 +- libcloud/test/compute/test_gandi.py | 208 ++++--- libcloud/test/compute/test_hostvirtual.py | 41 +- libcloud/test/compute/test_softlayer.py | 151 ++++- libcloud/test/compute/test_vcl.py | 58 +- .../dns/fixtures/gandi/zone_doesnt_exist.xml | 17 + libcloud/test/dns/test_gandi.py | 191 +++---- 31 files changed, 1892 insertions(+), 888 deletions(-) create mode 100644 libcloud/common/xmlrpc.py create mode 100644 libcloud/test/compute/fixtures/hostvirtual/get_node.json create mode 100644 libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/empty.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/fail.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml create mode 100644 libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml diff --git a/CHANGES b/CHANGES index d169ca106b..ce0bd1823b 100644 --- a/CHANGES +++ b/CHANGES @@ -27,6 +27,17 @@ Changes with Apache Libcloud 0.12.0: CA file which is used to verifying the server certificate. (LIBCLOUD-283) [Tomaz Muraus, Erinn Looney-Triggs] + - Add a common module (libcloud.common.xmlrpc) for handling XML-RPC + requests using Libcloud http layer. + + Also refactor existing drivers which use xmlrpclib directly (VCL, Gandi, + Softlayer) to use this module. + + This change allows drivers to support LIBCLOUD_DEBUG and SSL certificate + validation functionality. Previously they have bypassed Libcloud http + layer so this functionality was not available. (LIBCLOUD-288) + [John Carr] + *) Compute - Fix string interpolation bug in __repr__ methods in the IBM SCE driver. @@ -112,6 +123,21 @@ Changes with Apache Libcloud 0.12.0: relative path for the script argument. (LIBCLOUD-278) [Jaume Devesa] + - Fix Softlayer driver and make sure all the code is up to date and works + with the latest version of the actual Softlayer deployment (v3). + (LIBCLOUD-287) + [Kevin McDonald] + + - Update EC2 driver, m3 instance types are now available in all the regions + except Brazil. + + Also update pricing to reflect new (decreased) prices. + [Tomaz Muraus] + + - Minor improvements in the HostVirtual driver and add new ex_get_node and + ex_build_node extension method. (LIBCLOUD-249) + [Dinesh Bhoopathy] + *) Storage - Add a new local storage driver. diff --git a/libcloud/common/gandi.py b/libcloud/common/gandi.py index d27ce3e616..17a9193b0e 100644 --- a/libcloud/common/gandi.py +++ b/libcloud/common/gandi.py @@ -20,15 +20,13 @@ import hashlib import sys -from libcloud.utils.py3 import xmlrpclib from libcloud.utils.py3 import b from libcloud.common.base import ConnectionKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection # Global constants -API_URL = "https://rpc.gandi.net/xmlrpc/" - DEFAULT_TIMEOUT = 600 # operation pooling max seconds DEFAULT_INTERVAL = 20 # seconds between 2 operation.info @@ -38,70 +36,30 @@ class GandiException(Exception): Exception class for Gandi driver """ def __str__(self): - return "(%u) %s" % (self.args[0], self.args[1]) + return '(%u) %s' % (self.args[0], self.args[1]) def __repr__(self): - return "" % (self.args[0], self.args[1]) - - -class GandiSafeTransport(xmlrpclib.SafeTransport): - pass - - -class GandiTransport(xmlrpclib.Transport): - pass + return '' % (self.args[0], self.args[1]) -class GandiProxy(xmlrpclib.ServerProxy): - transportCls = (GandiTransport, GandiSafeTransport) - - def __init__(self, user_agent, verbose=0): - cls = self.transportCls[0] - if API_URL.startswith("https://"): - cls = self.transportCls[1] - t = cls(use_datetime=0) - t.user_agent = user_agent - xmlrpclib.ServerProxy.__init__( - self, - uri=API_URL, - transport=t, - verbose=verbose, - allow_none=True - ) +class GandiResponse(XMLRPCResponse): + """ + A Base Gandi Response class to derive from. + """ -class GandiConnection(ConnectionKey): +class GandiConnection(XMLRPCConnection, ConnectionKey): """ Connection class for the Gandi driver """ - proxyCls = GandiProxy - - def __init__(self, key, password=None): - super(GandiConnection, self).__init__(key) - self.driver = BaseGandiDriver - - try: - self._proxy = self.proxyCls(self._user_agent()) - except xmlrpclib.Fault: - e = sys.exc_info()[1] - raise GandiException(1000, e) + responseCls = GandiResponse + host = 'rpc.gandi.net' + endpoint = '/xmlrpc/' def request(self, method, *args): - """ Request xmlrpc method with given args""" - try: - return getattr(self._proxy, method)(self.key, *args) - except xmlrpclib.Fault: - e = sys.exc_info()[1] - self.parse_error(e.faultCode, e.faultString) - raise GandiException(1001, e.faultString) - - def parse_error(self, code, message): - """ - This hook allows you to inspect any xmlrpclib errors and - potentially raise a more useful and specific exception. - """ - pass + args = (self.key, ) + args + return super(GandiConnection, self).request(method, *args) class BaseGandiDriver(object): @@ -112,22 +70,6 @@ class BaseGandiDriver(object): connectionCls = GandiConnection name = 'Gandi' - def __init__(self, key, secret=None, secure=False): - """ - @param key: API key or username to used (required) - @type key: C{str} - - @param secret: Secret password to be used (required) - @type secret: C{str} - - @param secure: Weither to use HTTPS or HTTP. - @type secure: C{bool} - """ - self.key = key - self.secret = secret - self.connection = self.connectionCls(key, secret) - self.connection.driver = self - # Specific methods for gandi def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): @@ -135,7 +77,7 @@ def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, for i in range(0, timeout, check_interval): try: - op = self.connection.request('operation.info', int(id)) + op = self.connection.request('operation.info', int(id)).object if op['step'] == 'DONE': return True @@ -180,7 +122,7 @@ def get_uuid(self): Note, for example, that this example will always produce the same UUID! """ - hashstring = "%s:%s:%s" % \ + hashstring = '%s:%s:%s' % \ (self.uuid_prefix, self.id, self.driver.type) return hashlib.sha1(b(hashstring)).hexdigest() diff --git a/libcloud/common/hostvirtual.py b/libcloud/common/hostvirtual.py index 2c7f06a739..1c48308b98 100644 --- a/libcloud/common/hostvirtual.py +++ b/libcloud/common/hostvirtual.py @@ -19,6 +19,7 @@ from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.compute.types import InvalidCredsError from libcloud.common.types import LibcloudError API_HOST = 'www.vr.org' @@ -57,13 +58,10 @@ def parse_body(self): return data def parse_error(self): - context = self.connection.context data = self.parse_body() - status = int(self.status) if self.status == httplib.UNAUTHORIZED: - raise InvalidCredsError( - data['error']['code'] + ': ' + data['error']['message']) + raise InvalidCredsError('%(code)s:%(message)s' % (data['error'])) elif self.status == httplib.PRECONDITION_FAILED: raise HostVirtualException( data['error']['code'], data['error']['message']) diff --git a/libcloud/common/xmlrpc.py b/libcloud/common/xmlrpc.py new file mode 100644 index 0000000000..eba7a7c622 --- /dev/null +++ b/libcloud/common/xmlrpc.py @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base classes for working with xmlrpc APIs +""" + +import sys + +from libcloud.utils.py3 import xmlrpclib +from libcloud.utils.py3 import httplib +from libcloud.common.base import Response, Connection + + +class ProtocolError(Exception): + pass + + +class ErrorCodeMixin(object): + """ + This is a helper for API's that have a well defined collection of error + codes that are easily parsed out of error messages. It acts as a factory: + it finds the right exception for the error code, fetches any parameters it + needs from the context and raises it. + """ + + exceptions = {} + + def raise_exception_for_error(self, error_code, message): + exceptionCls = self.exceptions.get(error_code, None) + if exceptionCls is None: + return + context = self.connection.context + driver = self.connection.driver + params = {} + if hasattr(exceptionCls, 'kwargs'): + for key in exceptionCls.kwargs: + if key in context: + params[key] = context[key] + raise exceptionCls(value=message, driver=driver, **params) + + +class XMLRPCResponse(ErrorCodeMixin, Response): + + defaultExceptionCls = Exception + + def success(self): + return self.status == httplib.OK + + def parse_body(self): + try: + params, methodname = xmlrpclib.loads(self.body) + if len(params) == 1: + params = params[0] + return params + except xmlrpclib.Fault: + e = sys.exc_info()[1] + self.raise_exception_for_error(e.faultCode, e.faultString) + error_string = '%s: %s' % (e.faultCode, e.faultString) + raise self.defaultExceptionCls(error_string) + + def parse_error(self): + msg = 'Server returned an invalid xmlrpc response (%d)' % (self.status) + raise ProtocolError(msg) + + +class XMLRPCConnection(Connection): + """ + Connection class which can call XMLRPC based API's. + + This class uses the xmlrpclib marshalling and demarshalling code but uses + the http transports provided by libcloud giving it better certificate + validation and debugging helpers than the core client library. + """ + + responseCls = XMLRPCResponse + + def add_default_headers(self, headers): + headers['Content-Type'] = 'text/xml' + return headers + + def request(self, method_name, *args, **kwargs): + """ + Call a given `method_name`. + + @type method_name: C{str} + @param method_name: A method exposed by the xmlrpc endpoint that you + are connecting to. + + @type args: C{tuple} + @param args: Arguments to invoke with method with. + """ + endpoint = kwargs.get('endpoint', self.endpoint) + data = xmlrpclib.dumps(args, methodname=method_name, allow_none=True) + return super(XMLRPCConnection, self).request(endpoint, + data=data, + method='POST') diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 06620cd38b..829f20a4f8 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -204,6 +204,8 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge' ] @@ -239,6 +241,8 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'cc2.8xlarge' @@ -257,6 +261,8 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge' ] @@ -274,6 +280,8 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge' ] @@ -309,6 +317,8 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge' ] diff --git a/libcloud/compute/drivers/gandi.py b/libcloud/compute/drivers/gandi.py index a38e2116c7..d9baceab69 100644 --- a/libcloud/compute/drivers/gandi.py +++ b/libcloud/compute/drivers/gandi.py @@ -62,7 +62,7 @@ def __init__(self, *args, **kwargs): def _resource_info(self, type, id): try: obj = self.connection.request('%s.info' % type, int(id)) - return obj + return obj.object except Exception: e = sys.exc_info()[1] raise GandiException(1003, e) @@ -109,8 +109,8 @@ def _to_volumes(self, disks): return [self._to_volume(d) for d in disks] def list_nodes(self): - vms = self.connection.request('vm.list') - ips = self.connection.request('ip.list') + vms = self.connection.request('vm.list').object + ips = self.connection.request('ip.list').object for vm in vms: vm['ips'] = [] for ip in ips: @@ -124,7 +124,7 @@ def list_nodes(self): def reboot_node(self, node): op = self.connection.request('vm.reboot', int(node.id)) - self._wait_operation(op['id']) + self._wait_operation(op.object['id']) vm = self._node_info(int(node.id)) if vm['state'] == 'running': return True @@ -135,11 +135,11 @@ def destroy_node(self, node): if vm['state'] == 'running': # Send vm_stop and wait for accomplish op_stop = self.connection.request('vm.stop', int(node.id)) - if not self._wait_operation(op_stop['id']): + if not self._wait_operation(op_stop.object['id']): raise GandiException(1010, 'vm.stop failed') # Delete op = self.connection.request('vm.delete', int(node.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -221,7 +221,7 @@ def create_node(self, **kwargs): (op_disk, op_iface, op_vm) = self.connection.request( 'vm.create_from', vm_spec, disk_spec, src_disk_id - ) + ).object # We wait for vm_create to finish if self._wait_operation(op_vm['id']): @@ -251,7 +251,7 @@ def list_images(self, location=None): else: filtering = {} images = self.connection.request('image.list', filtering) - return [self._to_image(i) for i in images] + return [self._to_image(i) for i in images.object] except Exception: e = sys.exc_info()[1] raise GandiException(1011, e) @@ -268,7 +268,7 @@ def _to_size(self, id, size): ) def list_sizes(self, location=None): - account = self.connection.request('account.info') + account = self.connection.request('account.info').object # Look for available shares, and return a list of share_definition available_res = account['resources']['available'] @@ -306,8 +306,8 @@ def _to_loc(self, loc): ) def list_locations(self): - res = self.connection.request("datacenter.list") - return [self._to_loc(l) for l in res] + res = self.connection.request('datacenter.list') + return [self._to_loc(l) for l in res.object] def list_volumes(self): """ @@ -315,7 +315,7 @@ def list_volumes(self): @rtype: C{list} of L{StorageVolume} """ res = self.connection.request('disk.list', {}) - return self._to_volumes(res) + return self._to_volumes(res.object) def create_volume(self, size, name, location=None, snapshot=None): disk_param = { @@ -328,15 +328,15 @@ def create_volume(self, size, name, location=None, snapshot=None): disk_param, int(snapshot.id)) else: op = self.connection.request('disk.create', disk_param) - if self._wait_operation(op['id']): - disk = self._volume_info(op['disk_id']) + if self._wait_operation(op.object['id']): + disk = self._volume_info(op.object['disk_id']) return self._to_volume(disk) return None def attach_volume(self, node, volume, device=None): op = self.connection.request('vm.disk_attach', int(node.id), int(volume.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -354,13 +354,13 @@ def detach_volume(self, node, volume): """ op = self.connection.request('vm.disk_detach', int(node.id), int(volume.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False def destroy_volume(self, volume): op = self.connection.request('disk.delete', int(volume.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -401,8 +401,8 @@ def ex_list_interfaces(self): @rtype: C{list} of L{GandiNetworkInterface} """ - ifaces = self.connection.request('iface.list') - ips = self.connection.request('ip.list') + ifaces = self.connection.request('iface.list').object + ips = self.connection.request('ip.list').object for iface in ifaces: iface['ips'] = list( filter(lambda i: i['iface_id'] == iface['id'], ips)) @@ -432,7 +432,7 @@ def ex_list_disks(self): @rtype: C{list} of L{GandiDisk} """ res = self.connection.request('disk.list', {}) - return self._to_disks(res) + return self._to_disks(res.object) def ex_node_attach_disk(self, node, disk): """ @@ -448,7 +448,7 @@ def ex_node_attach_disk(self, node, disk): """ op = self.connection.request('vm.disk_attach', int(node.id), int(disk.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -466,7 +466,7 @@ def ex_node_detach_disk(self, node, disk): """ op = self.connection.request('vm.disk_detach', int(node.id), int(disk.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -485,7 +485,7 @@ def ex_node_attach_interface(self, node, iface): """ op = self.connection.request('vm.iface_attach', int(node.id), int(iface.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -504,7 +504,7 @@ def ex_node_detach_interface(self, node, iface): """ op = self.connection.request('vm.iface_detach', int(node.id), int(iface.id)) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -521,16 +521,16 @@ def ex_snapshot_disk(self, disk, name=None): @rtype: C{bool} """ if not disk.extra.get('can_snapshot'): - raise GandiException(1021, "Disk %s can't snapshot" % disk.id) + raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id) if not name: - suffix = datetime.today().strftime("%Y%m%d") - name = "snap_%s" % (suffix) + suffix = datetime.today().strftime('%Y%m%d') + name = 'snap_%s' % (suffix) op = self.connection.request( 'disk.create_from', {'name': name, 'type': 'snapshot', }, int(disk.id), ) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False @@ -557,6 +557,6 @@ def ex_update_disk(self, disk, new_size=None, new_name=None): op = self.connection.request('disk.update', int(disk.id), params) - if self._wait_operation(op['id']): + if self._wait_operation(op.object['id']): return True return False diff --git a/libcloud/compute/drivers/hostvirtual.py b/libcloud/compute/drivers/hostvirtual.py index 6a8408413d..088f4a7743 100644 --- a/libcloud/compute/drivers/hostvirtual.py +++ b/libcloud/compute/drivers/hostvirtual.py @@ -29,7 +29,7 @@ from libcloud.common.hostvirtual import HostVirtualConnection from libcloud.common.hostvirtual import HostVirtualException from libcloud.compute.providers import Provider -from libcloud.compute.types import NodeState, InvalidCredsError +from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeImage, NodeSize, NodeLocation from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword @@ -72,6 +72,13 @@ def _to_node(self, data): private_ips = [] extra = {} + if 'plan_id' in data: + extra['size'] = data['plan_id'] + if 'os_id' in data: + extra['image'] = data['os_id'] + if 'location_id' in data: + extra['location'] = data['location_id'] + public_ips.append(data['ip']) node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state, @@ -99,7 +106,7 @@ def list_sizes(self, location=None): data=json.dumps(params)).object sizes = [] for size in result: - n = NodeSize(id=size['plan'], + n = NodeSize(id=size['plan_id'], name=size['plan'], ram=size['ram'], disk=size['disk'], @@ -116,9 +123,9 @@ def list_images(self): i = NodeImage(id=image["id"], name=image["os"], driver=self.connection.driver, - extra={ - 'hypervisor': image['tech'], - 'arch': image['bits']}) + extra=image) + del i.extra['id'] + del i.extra['os'] images.append(i) return images @@ -143,7 +150,7 @@ def create_node(self, **kwargs): dc = '3' params = {'fqdn': name, - 'plan': size.id, + 'plan': size.name, 'image': image.id, 'location': dc } @@ -158,10 +165,7 @@ def create_node(self, **kwargs): params['password'] = password if not ssh_key and not password: - raise HostVirtualException(500, "Need SSH key or root password") - - if password is None: - raise HostVirtualException(500, "Root password cannot be empty") + raise HostVirtualException(500, "Need SSH key or Root password") result = self.connection.request(API_ROOT + '/cloud/buy_build', data=json.dumps(params), @@ -185,6 +189,22 @@ def destroy_node(self, node): return bool(result) + def ex_get_node(self, node_id): + """ + Get a single node. + + @param node_id: id of the node that we need the node object for + @type node_id: C{str} + + @rtype: L{Node} + """ + + params = {'mbpkgid': node_id} + result = self.connection.request( + API_ROOT + '/cloud/server', params=params).object + node = self._to_node(result) + return node + def ex_stop_node(self, node): """ Stop a node. @@ -218,3 +238,72 @@ def ex_start_node(self, node): method='POST').object return bool(result) + + def ex_build_node(self, **kwargs): + """ + Build a server on a VR package and get it booted + + @keyword node: node which should be used + @type node: L{Node} + + @keyword image: The distribution to deploy on your server (mandatory) + @type image: L{NodeImage} + + @keyword auth: an SSH key or root password (mandatory) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @keyword location: which datacenter to create the server in + @type location: L{NodeLocation} + + @rtype: C{bool} + """ + + node = kwargs['node'] + + if 'image' in kwargs: + image = kwargs['image'] + else: + image = node.extra['image'] + + params = { + 'mbpkgid': node.id, + 'image': image, + 'fqdn': node.name, + 'location': node.extra['location'], + } + + auth = kwargs['auth'] + + ssh_key = None + password = None + if isinstance(auth, NodeAuthSSHKey): + ssh_key = auth.pubkey + params['ssh_key'] = ssh_key + elif isinstance(auth, NodeAuthPassword): + password = auth.password + params['password'] = password + + if not ssh_key and not password: + raise HostVirtualException(500, "Need SSH key or Root password") + + result = self.connection.request(API_ROOT + '/cloud/server/build', + data=json.dumps(params), + method='POST').object + return bool(result) + + def ex_delete_node(self, node): + """ + Delete a node. + + @param node: Node which should be used + @type node: L{Node} + + @rtype: C{bool} + """ + + params = {'mbpkgid': node.id} + result = self.connection.request( + API_ROOT + '/cloud/server/delete', data=json.dumps(params), + method='POST').object + + return bool(result) diff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py index 6f87a0d810..babc89c866 100644 --- a/libcloud/compute/drivers/softlayer.py +++ b/libcloud/compute/drivers/softlayer.py @@ -23,15 +23,26 @@ from libcloud.utils.py3 import xmlrpclib +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \ NodeImage DATACENTERS = { + 'hou02': {'country': 'US'}, 'sea01': {'country': 'US'}, 'wdc01': {'country': 'US'}, - 'dal01': {'country': 'US'} + 'dal01': {'country': 'US'}, + 'dal02': {'country': 'US'}, + 'dal04': {'country': 'US'}, + 'dal05': {'country': 'US'}, + 'dal06': {'country': 'US'}, + 'dal07': {'country': 'US'}, + 'sjc01': {'country': 'US'}, + 'sng01': {'country': 'SG'}, + 'ams01': {'country': 'NL'}, } NODE_STATE_MAP = { @@ -40,113 +51,45 @@ 'PAUSED': NodeState.TERMINATED, } -DEFAULT_PACKAGE = 46 - -SL_IMAGES = [ - {'id': 1684, 'name': 'CentOS 5 - Minimal Install (32 bit)'}, - {'id': 1685, 'name': 'CentOS 5 - Minimal Install (64 bit)'}, - {'id': 1686, 'name': 'CentOS 5 - LAMP Install (32 bit)'}, - {'id': 1687, 'name': 'CentOS 5 - LAMP Install (64 bit)'}, - {'id': 1688, - 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (32 bit)'}, - {'id': 1689, - 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (64 bit)'}, - {'id': 1690, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (32 bit)'}, - {'id': 1691, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (64 bit)'}, - {'id': 1692, - 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit)'}, - {'id': 1693, - 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit)'}, - {'id': 1694, - 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (32 bit)'}, - {'id': 1695, - 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (64 bit)'}, - {'id': 1696, - 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit)'}, - {'id': 1697, - 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit)'}, - {'id': 1698, - 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (32 bit)'}, - {'id': 1699, - 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (64 bit)'}, - {'id': 1700, 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)'}, - {'id': 1701, 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)'}, - {'id': 1703, - 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)'}, - {'id': 1705, 'name': 'Windows Server 2008 Standard Edition (64bit)'}, - {'id': 1715, 'name': 'Windows Server 2003 Datacenter SP2 (64 bit)'}, - {'id': 1716, 'name': 'Windows Server 2003 Datacenter SP2 (32 bit)'}, - {'id': 1742, 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)'}, - {'id': 1752, 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)'}, - {'id': 1756, 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)'}, - {'id': 1761, 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)'}, - {'id': 1766, 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)'}, - {'id': 1770, 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)'}, - {'id': 1857, 'name': 'Windows Server 2008 R2 Standard Edition (64bit)'}, - {'id': 1860, 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)'}, - {'id': 1863, 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)'}, -] - -""" -The following code snippet will print out all available "prices" - mask = { 'items': '' } - res = self.connection.request( - "SoftLayer_Product_Package", - "getObject", - res, - id=46, - object_mask=mask - ) - - from pprint import pprint; pprint(res) -""" -SL_TEMPLATES = { - 'sl1': { - 'imagedata': { - 'name': '2 x 2.0 GHz, 1GB ram, 100GB', - 'ram': 1024, - 'disk': 100, - 'bandwidth': None - }, - 'prices': [ - {'id': 1644}, # 1 GB - {'id': 1639}, # 100 GB (SAN) - {'id': 1963}, # Private 2 x 2.0 GHz Cores - {'id': 21}, # 1 IP Address - {'id': 55}, # Host Ping - {'id': 58}, # Automated Notification - {'id': 1800}, # 0 GB Bandwidth - {'id': 57}, # Email and Ticket - {'id': 274}, # 1000 Mbps Public & Private Networks - {'id': 905}, # Reboot / Remote Console - {'id': 418}, # Nessus Vulnerability Assessment & Reporting - {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account - ], - }, - 'sl2': { - 'imagedata': { - 'name': '2 x 2.0 GHz, 4GB ram, 350GB', - 'ram': 4096, - 'disk': 350, - 'bandwidth': None - }, - 'prices': [ - {'id': 1646}, # 4 GB - {'id': 1639}, # 100 GB (SAN) - This is the only available "First Disk" - {'id': 1638}, # 250 GB (SAN) - {'id': 1963}, # Private 2 x 2.0 GHz Cores - {'id': 21}, # 1 IP Address - {'id': 55}, # Host Ping - {'id': 58}, # Automated Notification - {'id': 1800}, # 0 GB Bandwidth - {'id': 57}, # Email and Ticket - {'id': 274}, # 1000 Mbps Public & Private Networks - {'id': 905}, # Reboot / Remote Console - {'id': 418}, # Nessus Vulnerability Assessment & Reporting - {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account - ], - } -} +SL_BASE_TEMPLATES = [ + { + 'name': '1 CPU, 1GB ram, 25GB', + 'ram': 1024, + 'disk': 25, + 'cpus': 1, + }, { + 'name': '1 CPU, 1GB ram, 100GB', + 'ram': 1024, + 'disk': 100, + 'cpus': 1, + }, { + 'name': '2 CPU, 2GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 2, + }, { + 'name': '4 CPU, 4GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 4, + }, { + 'name': '8 CPU, 8GB ram, 100GB', + 'ram': 8 * 1024, + 'disk': 100, + 'cpus': 8, + }] + +SL_TEMPLATES = {} +for i, template in enumerate(SL_BASE_TEMPLATES): + # Add local disk templates + local = template.copy() + local['local_disk'] = True + SL_TEMPLATES['sl%s_local_disk' % (i + 1,)] = local + + # Add san disk templates + san = template.copy() + san['local_disk'] = False + SL_TEMPLATES['sl%s_san_disk' % (i + 1,)] = san class SoftLayerException(LibcloudError): @@ -156,75 +99,37 @@ class SoftLayerException(LibcloudError): pass -class SoftLayerSafeTransport(xmlrpclib.SafeTransport): - pass - - -class SoftLayerTransport(xmlrpclib.Transport): - pass - - -class SoftLayerProxy(xmlrpclib.ServerProxy): - transportCls = (SoftLayerTransport, SoftLayerSafeTransport) - API_PREFIX = 'https://api.softlayer.com/xmlrpc/v3/' - - def __init__(self, service, user_agent, verbose=0): - cls = self.transportCls[0] - if SoftLayerProxy.API_PREFIX[:8] == "https://": - cls = self.transportCls[1] - t = cls(use_datetime=0) - t.user_agent = user_agent - xmlrpclib.ServerProxy.__init__( - self, - uri="%s/%s" % (SoftLayerProxy.API_PREFIX, service), - transport=t, - verbose=verbose - ) - - -class SoftLayerConnection(object): - """ - Connection class for the SoftLayer driver - """ +class SoftLayerResponse(XMLRPCResponse): + defaultExceptionCls = SoftLayerException + exceptions = { + 'SoftLayer_Account': InvalidCredsError, + } - proxyCls = SoftLayerProxy - driver = None - def __init__(self, user, key): - self.user = user - self.key = key - self.ua = [] +class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey): + responseCls = SoftLayerResponse + endpoint = '/xmlrpc/v3/' def request(self, service, method, *args, **kwargs): - sl = self.proxyCls(service, self._user_agent()) - headers = {} headers.update(self._get_auth_headers()) headers.update(self._get_init_params(service, kwargs.get('id'))) headers.update( self._get_object_mask(service, kwargs.get('object_mask'))) - params = [{'headers': headers}] + list(args) - - try: - return getattr(sl, method)(*params) - except xmlrpclib.Fault: - e = sys.exc_info()[1] - if e.faultCode == "SoftLayer_Account": - raise InvalidCredsError(e.faultString) - raise SoftLayerException(e) + headers.update( + self._get_object_mask(service, kwargs.get('object_mask'))) - def _user_agent(self): - return 'libcloud/%s (%s)%s' % (libcloud.__version__, - self.driver.name, - "".join([" (%s)" % x for x in self.ua])) + args = ({'headers': headers}, ) + args + endpoint = '%s/%s' % (self.endpoint, service) - def user_agent_append(self, s): - self.ua.append(s) + return super(SoftLayerConnection, self).request(method, *args, + **{'endpoint': + endpoint}) def _get_auth_headers(self): return { 'authenticate': { - 'username': self.user, + 'username': self.user_id, 'apiKey': self.key } } @@ -262,30 +167,12 @@ class SoftLayerNodeDriver(NodeDriver): website = 'http://www.softlayer.com/' type = Provider.SOFTLAYER - features = {"create_node": ["generates_password"]} - - def __init__(self, key, secret=None, secure=False): - """ - @param key: API key or username to used (required) - @type key: C{str} - - @param secret: Secret password to be used (required) - @type secret: C{str} - - @param secure: Weither to use HTTPS or HTTP. - @type secure: C{bool} - - @rtype: C{None} - """ - self.key = key - self.secret = secret - self.connection = self.connectionCls(key, secret) - self.connection.driver = self + features = {'create_node': ['generates_password']} def _to_node(self, host): try: password = \ - host['softwareComponents'][0]['passwords'][0]['password'] + host['operatingSystem']['passwords'][0]['password'] except (IndexError, KeyError): password = None @@ -293,13 +180,13 @@ def _to_node(self, host): 'hourlyRecurringFee', 0) recurringFee = host.get('billingItem', {}).get('recurringFee', 0) recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) + createDate = host.get('createDate', None) return Node( id=host['id'], name=host['hostname'], state=NODE_STATE_MAP.get( - host['powerState']['keyName'], - NodeState.UNKNOWN + host['powerState']['keyName'], NodeState.UNKNOWN ), public_ips=[host['primaryIpAddress']], private_ips=[host['primaryBackendIpAddress']], @@ -309,59 +196,44 @@ def _to_node(self, host): 'hourlyRecurringFee': hourlyRecurringFee, 'recurringFee': recurringFee, 'recurringMonths': recurringMonths, + 'created': createDate, } ) - def _to_nodes(self, hosts): - return [self._to_node(h) for h in hosts] - def destroy_node(self, node): - billing_item = self.connection.request( - "SoftLayer_Virtual_Guest", - "getBillingItem", - id=node.id + self.connection.request( + 'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id ) + return True - if billing_item: - res = self.connection.request( - "SoftLayer_Billing_Item", - "cancelService", - id=billing_item['id'] - ) - return res - else: - return False + def reboot_node(self, node): + self.connection.request( + 'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id + ) + return True - def _get_order_information(self, order_id, timeout=1200, check_interval=5): + def _get_order_information(self, node_id, timeout=1200, check_interval=5): mask = { - 'orderTopLevelItems': { - 'billingItem': { - 'resource': { - 'softwareComponents': {'passwords': ''}, - 'powerState': '', - } - }, - } + 'billingItem': '', + 'powerState': '', + 'operatingSystem': {'passwords': ''}, + 'provisionDate': '', } for i in range(0, timeout, check_interval): - try: - res = self.connection.request( - "SoftLayer_Billing_Order", - "getObject", - id=order_id, - object_mask=mask - ) - item = res['orderTopLevelItems'][0]['billingItem']['resource'] - if item['softwareComponents'][0]['passwords']: - return item - - except (KeyError, IndexError): - pass + res = self.connection.request( + 'SoftLayer_Virtual_Guest', + 'getObject', + id=node_id, + object_mask=mask + ).object + + if res.get('provisionDate', None): + return res time.sleep(check_interval) - return None + raise SoftLayerException('Timeout on getting node details') def create_node(self, **kwargs): """Create a new SoftLayer node @@ -370,55 +242,111 @@ def create_node(self, **kwargs): @keyword ex_domain: e.g. libcloud.org @type ex_domain: C{str} + @keyword ex_cpus: e.g. 2 + @type ex_cpus: C{int} + @keyword ex_disk: e.g. 100 + @type ex_disk: C{int} + @keyword ex_ram: e.g. 2048 + @type ex_ram: C{int} + @keyword ex_bandwidth: e.g. 100 + @type ex_bandwidth: C{int} + @keyword ex_local_disk: e.g. True + @type ex_local_disk: C{bool} + @keyword ex_datacenter: e.g. Dal05 + @type ex_datacenter: C{str} + @keyword ex_os: e.g. UBUNTU_LATEST + @type ex_os: C{str} """ name = kwargs['name'] - image = kwargs['image'] - size = kwargs['size'] + os = 'DEBIAN_LATEST' + if 'ex_os' in kwargs: + os = kwargs['ex_os'] + elif 'image' in kwargs: + os = kwargs['image'].id + + size = kwargs.get('size', NodeSize(id=None, name='Custom', ram=None, + disk=None, bandwidth=None, + price=None, + driver=self.connection.driver)) + + ex_size_data = SL_TEMPLATES.get(size.id) or {} + cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or 1 + ram = kwargs.get('ex_ram') or size.ram or 2048 + bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10 + hourly = 'true' if kwargs.get('ex_hourly', True) else 'false' + + local_disk = 'true' + if ex_size_data.get('local_disk') is False: + local_disk = 'false' + + if kwargs.get('ex_local_disk') is False: + local_disk = 'false' + + disk_size = 100 + if size.disk: + disk_size = size.disk + if kwargs.get('ex_disk'): + disk_size = kwargs.get('ex_disk') + + datacenter = '' + if 'ex_datacenter' in kwargs: + datacenter = kwargs['ex_datacenter'] + elif 'location' in kwargs: + datacenter = kwargs['location'].id + domain = kwargs.get('ex_domain') - location = kwargs['location'] if domain is None: - if name.find(".") != -1: + if name.find('.') != -1: domain = name[name.find('.') + 1:] - if domain is None: # TODO: domain is a required argument for the Sofylayer API, but it # it shouldn't be. - domain = "exmaple.com" - - res = {'prices': SL_TEMPLATES[size.id]['prices']} - res['packageId'] = DEFAULT_PACKAGE - res['prices'].append({'id': image.id}) # Add OS to order - res['location'] = location.id - res['complexType'] = 'SoftLayer_Container_Product_Order_Virtual_Guest' - res['quantity'] = 1 - res['useHourlyPricing'] = True - res['virtualGuests'] = [ - { - 'hostname': name, - 'domain': domain - } - ] + domain = 'example.com' + + newCCI = { + 'hostname': name, + 'domain': domain, + 'startCpus': cpu_count, + 'maxMemory': ram, + 'networkComponents': [{'maxSpeed': bandwidth}], + 'hourlyBillingFlag': hourly, + 'operatingSystemReferenceCode': os, + 'localDiskFlag': local_disk, + 'blockDevices': [ + { + 'device': '0', + 'diskImage': { + 'capacity': disk_size, + } + } + ] + + } + + if datacenter: + newCCI['datacenter'] = {'name': datacenter} res = self.connection.request( - "SoftLayer_Product_Order", - "placeOrder", - res - ) + 'SoftLayer_Virtual_Guest', 'createObject', newCCI + ).object - order_id = res['orderId'] - raw_node = self._get_order_information(order_id) + node_id = res['id'] + raw_node = self._get_order_information(node_id) return self._to_node(raw_node) def _to_image(self, img): return NodeImage( - id=img['id'], - name=img['name'], + id=img['template']['operatingSystemReferenceCode'], + name=img['itemPrice']['item']['description'], driver=self.connection.driver ) def list_images(self, location=None): - return [self._to_image(i) for i in SL_IMAGES] + result = self.connection.request( + 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions' + ).object + return [self._to_image(i) for i in result['operatingSystems']] def _to_size(self, id, size): return NodeSize( @@ -426,38 +354,32 @@ def _to_size(self, id, size): name=size['name'], ram=size['ram'], disk=size['disk'], - bandwidth=size['bandwidth'], + bandwidth=size.get('bandwidth'), price=None, driver=self.connection.driver, ) def list_sizes(self, location=None): - return [self._to_size(id, s['imagedata']) for id, s in - list(SL_TEMPLATES.items())] + return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()] def _to_loc(self, loc): - return NodeLocation( - id=loc['id'], - name=loc['name'], - country=DATACENTERS[loc['name']]['country'], - driver=self - ) + country = 'UNKNOWN' + if loc['name'] in DATACENTERS: + country = DATACENTERS[loc['name']]['country'] + return NodeLocation(id=loc['name'], name=loc['longName'], + country=country, driver=self) def list_locations(self): res = self.connection.request( - "SoftLayer_Location_Datacenter", - "getDatacenters" - ) - - # checking "in DATACENTERS", because some of the locations returned - # by getDatacenters are not useable. - return [self._to_loc(l) for l in res if l['name'] in DATACENTERS] + 'SoftLayer_Location_Datacenter', 'getDatacenters' + ).object + return [self._to_loc(l) for l in res] def list_nodes(self): mask = { 'virtualGuests': { 'powerState': '', - 'softwareComponents': {'passwords': ''}, + 'operatingSystem': {'passwords': ''}, 'billingItem': '', }, } @@ -465,14 +387,5 @@ def list_nodes(self): "SoftLayer_Account", "getVirtualGuests", object_mask=mask - ) - nodes = self._to_nodes(res) - return nodes - - def reboot_node(self, node): - res = self.connection.request( - "SoftLayer_Virtual_Guest", - "rebootHard", - id=node.id - ) - return res + ).object + return [self._to_node(h) for h in res] diff --git a/libcloud/compute/drivers/vcl.py b/libcloud/compute/drivers/vcl.py index 866d6e44e4..0d4e80f6d5 100644 --- a/libcloud/compute/drivers/vcl.py +++ b/libcloud/compute/drivers/vcl.py @@ -16,89 +16,30 @@ VCL driver """ -import sys import time -from libcloud.utils.py3 import xmlrpclib - +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeSize, NodeImage -class VCLSafeTransport(xmlrpclib.SafeTransport): - def __init__(self, datetime, user, passwd, host): - - self._pass = passwd - self._use_datetime = datetime - self._connection = (None, None) - self._extra_headers = [] - - def send_content(self, connection, request_body): - connection.putheader('Content-Type', 'text/xml') - connection.putheader('X-APIVERSION', '2') - connection.putheader('X-User', self._user) - connection.putheader('X-Pass', self._pass) - connection.putheader('Content-Length', str(len(request_body))) - connection.endheaders(request_body) - - -class VCLProxy(xmlrpclib.ServerProxy): - API_POSTFIX = '/index.php?mode=xmlrpccall' - transportCls = VCLSafeTransport - - def __init__(self, user, key, secure, host, port, driver, verbose=False): - url = '' - cls = self.transportCls - - if secure: - url = 'https://' - port = port or 443 - else: - url = 'http://' - port = port or 80 - - url += host + ':' + str(port) - url += VCLProxy.API_POSTFIX - - self.API = url - t = cls(0, user, key, self.API) - - xmlrpclib.ServerProxy.__init__( - self, - uri=self.API, - transport=t, - verbose=verbose - ) - - -class VCLConnection(object): - """ - Connection class for the VCL driver - """ - - proxyCls = VCLProxy - driver = None +class VCLResponse(XMLRPCResponse): + exceptions = { + 'VCL_Account': InvalidCredsError, + } - def __init__(self, user, key, secure, host, port): - self.user = user - self.key = key - self.secure = secure - self.host = host - self.port = port - def request(self, method, *args, **kwargs): - sl = self.proxyCls(user=self.user, key=self.key, secure=self.secure, - host=self.host, port=self.port, driver=self.driver) +class VCLConnection(XMLRPCConnection, ConnectionUserAndKey): + endpoint = '/index.php?mode=xmlrpccall' - try: - return getattr(sl, method)(*args) - except xmlrpclib.Fault: - e = sys.exc_info()[1] - if e.faultCode == 'VCL_Account': - raise InvalidCredsError(e.faultString) - raise LibcloudError(e, driver=self.driver) + def add_default_headers(self, headers): + headers['X-APIVERSION'] = '2' + headers['X-User'] = self.user_id + headers['X-Pass'] = self.key + return headers class VCLNodeDriver(NodeDriver): @@ -151,17 +92,15 @@ def __init__(self, key, secret, secure=True, host=None, port=None, *args, raise Exception('When instantiating VCL driver directly ' + 'you also need to provide host') - self.key = key - self.host = host - self.secret = secret - self.connection = self.connectionCls(key, secret, secure, host, port) - self.connection.driver = self + super(VCLNodeDriver, self).__init__(key, secret, secure=True, + host=None, port=None, *args, + **kwargs) def _vcl_request(self, method, *args): res = self.connection.request( method, *args - ) + ).object if(res['status'] == 'error'): raise LibcloudError(res['errormsg'], driver=self) return res @@ -237,7 +176,7 @@ def list_images(self, location=None): """ res = self.connection.request( "XMLRPCgetImages" - ) + ).object return [self._to_image(i) for i in res] def list_sizes(self, location=None): diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 090498ed11..6bbbcdcc13 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -57,8 +57,8 @@ "m2.xlarge": 0.50, "m2.2xlarge": 1.0, "m2.4xlarge": 2.0, - "m3.xlarge": 0.58, - "m3.2xlarge": 1.16, + "m3.xlarge": 0.50, + "m3.2xlarge": 1.00, "cg1.4xlarge": 2.1, "cc1.4xlarge": 1.3, "cc2.8xlarge": 2.4, @@ -76,7 +76,9 @@ "c1.xlarge": 0.76, "m2.xlarge": 0.57, "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28 + "m2.4xlarge": 2.28, + "m3.xlarge": 0.55, + "m3.2xlarge": 1.10 }, "ec2_us_west_oregon": { @@ -90,6 +92,8 @@ "m2.xlarge": 0.50, "m2.2xlarge": 1.0, "m2.4xlarge": 2.0, + "m3.xlarge": 0.50, + "m3.2xlarge": 1.00, "cc2.8xlarge": 2.400 }, @@ -104,6 +108,8 @@ "m2.xlarge": 0.57, "m2.2xlarge": 1.14, "m2.4xlarge": 2.28, + "m3.xlarge": 0.55, + "m3.2xlarge": 1.10, "cc2.8xlarge": 2.7 }, @@ -117,7 +123,9 @@ "c1.xlarge": 0.76, "m2.xlarge": 0.57, "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28 + "m2.4xlarge": 2.28, + "m3.xlarge": 0.70, + "m3.2xlarge": 1.40 }, "ec2_ap_northeast": { @@ -130,7 +138,9 @@ "c1.xlarge": 0.80, "m2.xlarge": 0.60, "m2.2xlarge": 1.20, - "m2.4xlarge": 2.39 + "m2.4xlarge": 2.39, + "m3.xlarge": 0.76, + "m3.2xlarge": 1.52 }, "ec2_sa_east": { @@ -156,7 +166,9 @@ "c1.xlarge": 0.744, "m2.xlarge": 0.506, "m2.2xlarge": 1.012, - "m2.4xlarge": 2.024 + "m2.4xlarge": 2.024, + "m3.xlarge": 0.70, + "m3.2xlarge": 1.40 }, "nimbus" : { diff --git a/libcloud/dns/drivers/gandi.py b/libcloud/dns/drivers/gandi.py index 83f2933ef2..8a8f68a0a8 100644 --- a/libcloud/dns/drivers/gandi.py +++ b/libcloud/dns/drivers/gandi.py @@ -20,6 +20,7 @@ ] from libcloud.common.gandi import BaseGandiDriver, GandiConnection +from libcloud.common.gandi import GandiResponse from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import RecordError from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError @@ -51,7 +52,7 @@ def __init__(self, driver, zone): def __enter__(self): zid = int(self.zone.id) self.connection.set_context({'zone_id': self.zone.id}) - vid = self.connection.request('domain.zone.version.new', zid) + vid = self.connection.request('domain.zone.version.new', zid).object self.vid = vid return vid @@ -60,16 +61,17 @@ def __exit__(self, type, value, traceback): zid = int(self.zone.id) con = self.connection con.set_context({'zone_id': self.zone.id}) - con.request('domain.zone.version.set', zid, self.vid) + con.request('domain.zone.version.set', zid, self.vid).object -class GandiDNSConnection(GandiConnection): +class GandiDNSResponse(GandiResponse): + exceptions = { + 581042: ZoneDoesNotExistError, + } + - def parse_error(self, code, message): - if code == 581042: - zone_id = str(self.context.get('zone_id', None)) - raise ZoneDoesNotExistError(value='', driver=self.driver, - zone_id=zone_id) +class GandiDNSConnection(GandiConnection): + responseCls = GandiDNSResponse class GandiDNSDriver(BaseGandiDriver, DNSDriver): @@ -100,7 +102,7 @@ class GandiDNSDriver(BaseGandiDriver, DNSDriver): def _to_zone(self, zone): return Zone( - id=zone['id'], + id=str(zone['id']), domain=zone['name'], type='master', ttl=0, @@ -116,31 +118,33 @@ def _to_zones(self, zones): def list_zones(self): zones = self.connection.request('domain.zone.list') - return self._to_zones(zones) + return self._to_zones(zones.object) def get_zone(self, zone_id): zid = int(zone_id) - self.connection.set_context({'zone_id': zid}) + self.connection.set_context({'zone_id': zone_id}) zone = self.connection.request('domain.zone.info', zid) - return self._to_zone(zone) + return self._to_zone(zone.object) def create_zone(self, domain, type='master', ttl=None, extra=None): - params = {'name': domain} + params = { + 'name': domain, + } info = self.connection.request('domain.zone.create', params) - return self._to_zone(info) + return self._to_zone(info.object) def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): zid = int(zone.id) params = {'name': domain} - self.connection.set_context({'zone_id': zid}) + self.connection.set_context({'zone_id': zone.id}) zone = self.connection.request('domain.zone.update', zid, params) - return self._to_zone(zone) + return self._to_zone(zone.object) def delete_zone(self, zone): zid = int(zone.id) - self.connection.set_context({'zone_id': zid}) + self.connection.set_context({'zone_id': zone.id}) res = self.connection.request('domain.zone.delete', zid) - return res + return res.object def _to_record(self, record, zone): return Record( @@ -161,9 +165,9 @@ def _to_records(self, records, zone): def list_records(self, zone): zid = int(zone.id) - self.connection.set_context({'zone_id': zid}) + self.connection.set_context({'zone_id': zone.id}) records = self.connection.request('domain.zone.record.list', zid, 0) - return self._to_records(records, zone) + return self._to_records(records.object, zone) def get_record(self, zone_id, record_id): zid = int(zone_id) @@ -172,9 +176,9 @@ def get_record(self, zone_id, record_id): 'name': name, 'type': record_type } - self.connection.set_context({'zone_id': zid}) + self.connection.set_context({'zone_id': zone_id}) records = self.connection.request('domain.zone.record.list', - zid, 0, filter_opts) + zid, 0, filter_opts).object if len(records) == 0: raise RecordDoesNotExistError(value='', driver=self, @@ -210,9 +214,9 @@ def create_record(self, name, zone, type, data, extra=None): with NewZoneVersion(self, zone) as vid: con = self.connection - con.set_context({'zone_id': zid}) + con.set_context({'zone_id': zone.id}) rec = con.request('domain.zone.record.add', - zid, vid, create) + zid, vid, create).object return self._to_record(rec, zone) @@ -237,11 +241,11 @@ def update_record(self, record, name, type, data, extra): with NewZoneVersion(self, record.zone) as vid: con = self.connection - con.set_context({'zone_id': zid}) + con.set_context({'zone_id': record.zone.id}) con.request('domain.zone.record.delete', zid, vid, filter_opts) res = con.request('domain.zone.record.add', - zid, vid, update) + zid, vid, update).object return self._to_record(res, record.zone) @@ -255,9 +259,9 @@ def delete_record(self, record): with NewZoneVersion(self, record.zone) as vid: con = self.connection - con.set_context({'zone_id': zid}) + con.set_context({'zone_id': record.zone.id}) count = con.request('domain.zone.record.delete', - zid, vid, filter_opts) + zid, vid, filter_opts).object if count == 1: return True diff --git a/libcloud/dns/drivers/hostvirtual.py b/libcloud/dns/drivers/hostvirtual.py index bab8feb752..c0ac57a01f 100644 --- a/libcloud/dns/drivers/hostvirtual.py +++ b/libcloud/dns/drivers/hostvirtual.py @@ -21,8 +21,6 @@ from libcloud.common.hostvirtual import HostVirtualResponse from libcloud.common.hostvirtual import HostVirtualConnection from libcloud.compute.drivers.hostvirtual import API_ROOT -from libcloud.compute.drivers.hostvirtual import HostVirtualConnection -from libcloud.compute.drivers.hostvirtual import HostVirtualResponse from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record diff --git a/libcloud/test/common/test_gandi.py b/libcloud/test/common/test_gandi.py index 635266086b..962bd29188 100644 --- a/libcloud/test/common/test_gandi.py +++ b/libcloud/test/common/test_gandi.py @@ -1,33 +1,15 @@ -import sys -import unittest - -from xml.etree import ElementTree as ET - from libcloud.utils.py3 import xmlrpclib +from libcloud.test import MockHttp -class MockGandiTransport(xmlrpclib.Transport): - - def request(self, host, handler, request_body, verbose=0): - self.verbose = 0 - method = ET.XML(request_body).find('methodName').text - mock = self.mockCls(host, 80) - mock.request('POST', '%s/%s' % (handler, method)) - resp = mock.getresponse() - - if sys.version[0] == '2' and sys.version[2] == '7': - response = self.parse_response(resp) - else: - response = self.parse_response(resp.body) - return response - +class BaseGandiMockHttp(MockHttp): -class BaseGandiTests(unittest.TestCase): + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" - def setUp(self): - d = self.driverCls - t = self.transportCls - t.mockCls.type = None - d.connectionCls.proxyCls.transportCls = \ - [t, t] - self.driver = d(*self.params) + def _xmlrpc(self, method, url, body, headers): + params, methodName = xmlrpclib.loads(body) + meth_name = '_xmlrpc__' + methodName.replace('.', '_') + if self.type: + meth_name = '%s_%s' % (meth_name, self.type) + return getattr(self, meth_name)(method, url, body, headers) diff --git a/libcloud/test/compute/fixtures/hostvirtual/get_node.json b/libcloud/test/compute/fixtures/hostvirtual/get_node.json new file mode 100644 index 0000000000..5f00d4dfbf --- /dev/null +++ b/libcloud/test/compute/fixtures/hostvirtual/get_node.json @@ -0,0 +1,16 @@ +{ + "host": "659", + "rescue": "0", + "fqdn": "server1.vr-cluster.org", + "mbpkgid": "62291", + "locked": "0", + "os_id": "1613", + "os": "Debian 6 i386 PV", + "ip": "208.111.45.250", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:10::f98", + "city": "MAA - Chennai (Madras), India", + "status": "TERMINATED" +} diff --git a/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json b/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json index 75ab83476e..3eb8ba6b00 100644 --- a/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json +++ b/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json @@ -1,62 +1,72 @@ -[ - { - "host": "659", - "rescue": "0", - "fqdn": "server1.vr-cluster.org", - "mbpkgid": "62291", - "locked": "0", - "os": "Debian 6 i386 PV", - "ip": "208.111.45.250", - "installed": "0", - "state": "DOWN", - "package": "VR512", - "ipv6": "2607:f740:10::f98", - "city": "MAA - Chennai (Madras), India", - "status": "TERMINATED" - }, - { - "host": "902", - "rescue": "0", - "fqdn": "newbuild.vr.com", - "mbpkgid": "62327", - "locked": "0", - "os": "CentOS 5.8 x64", - "ip": "208.111.39.118", - "installed": "0", - "state": "DOWN", - "package": "VR512", - "ipv6": "2607:f740:0:3f::f0d", - "city": "SJC - San Jose, CA", - "status": "TERMINATED" - }, - { - "host": "1010", - "rescue": "0", - "fqdn": "3test.build.com", - "mbpkgid": "62300", - "locked": "0", - "os": "CentOS 6.2 x64", - "ip": "208.111.40.179", - "installed": "0", - "state": "DOWN", - "package": "VR512", - "ipv6": "2607:f740:c::f4f", - "city": "LAX3 - Los Angeles, CA", - "status": "TERMINATED" - }, - { - "host": "1028", - "rescue": "0", - "fqdn": "libcloud2.node.com", - "mbpkgid": "74567", - "locked": "0", - "os": "CentOS 5.8 x64", - "ip": "209.177.157.99", - "installed": "1", - "state": "UP", - "package": "VR512", - "ipv6": "2607:f740:b::eff", - "city": "IAD2- Reston, VA", - "status": "RUNNING" - } -] +[{ + "host": "659", + "rescue": "0", + "fqdn": "server1.vr-cluster.org", + "mbpkgid": "62291", + "locked": "0", + "os": "Debian 6 i386 PV", + "ip": "208.111.45.250", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:10::f98", + "city": "MAA - Chennai (Madras), India", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "902", + "rescue": "0", + "fqdn": "newbuild.vr.com", + "mbpkgid": "62327", + "locked": "0", + "os": "CentOS 5.8 x64", + "ip": "208.111.39.118", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:0:3f::f0d", + "city": "SJC - San Jose, CA", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "1010", + "rescue": "0", + "fqdn": "3test.build.com", + "mbpkgid": "62300", + "locked": "0", + "os": "CentOS 6.2 x64", + "ip": "208.111.40.179", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:c::f4f", + "city": "LAX3 - Los Angeles, CA", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "1028", + "rescue": "0", + "fqdn": "libcloud2.node.com", + "mbpkgid": "74567", + "locked": "0", + "os": "CentOS 5.8 x64", + "ip": "209.177.157.99", + "installed": "1", + "state": "UP", + "package": "VR512", + "ipv6": "2607:f740:b::eff", + "city": "IAD2- Reston, VA", + "status": "RUNNING", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}] diff --git a/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json b/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json index 410173b011..ec7f9afe53 100644 --- a/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json +++ b/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json @@ -1,82 +1,128 @@ [ { + "plan_id": "31", "plan": "VR256", "ram": "256MB", "disk": "10GB", "transfer": "200GB", "price": "10.00", - "available": "1421" + "available": "1167" }, { + "plan_id": "41", "plan": "VR384", "ram": "384MB", "disk": "15GB", "transfer": "300GB", "price": "15.00", - "available": "939" + "available": "768" }, { + "plan_id": "51", "plan": "VR512", "ram": "512MB", "disk": "20GB", "transfer": "400GB", "price": "20.00", - "available": "713" + "available": "620" }, { + "plan_id": "61", "plan": "VR768", "ram": "768MB", "disk": "30GB", "transfer": "600GB", "price": "30.00", - "available": "476" + "available": "403" }, { + "plan_id": "71", "plan": "VR1024", "ram": "1024MB", "disk": "40GB", "transfer": "800GB", "price": "40.00", - "available": "350" + "available": "304" }, { + "plan_id": "81", "plan": "VR1280", "ram": "1280MB", "disk": "50GB", "transfer": "1000GB", "price": "50.00", - "available": "276" + "available": "234" }, { + "plan_id": "91", "plan": "VR1536", "ram": "1536MB", "disk": "60GB", "transfer": "1200GB", "price": "60.00", - "available": "226" + "available": "190" }, { + "plan_id": "101", "plan": "VR2048", "ram": "2048MB", "disk": "80GB", "transfer": "1600GB", "price": "80.00", - "available": "160" + "available": "138" }, { + "plan_id": "128", + "plan": "VRBL1G", + "ram": "1024MB", + "disk": "50GB", + "transfer": "1000GB", + "price": "150.00", + "available": "34" + }, + { + "plan_id": "111", "plan": "VR4048", "ram": "4048MB", "disk": "160GB", "transfer": "3200GB", "price": "160.00", - "available": "69" + "available": "60" + }, + { + "plan_id": "137", + "plan": "VRBL2G", + "ram": "2048MB", + "disk": "100GB", + "transfer": "2000GB", + "price": "200.00", + "available": "16" + }, + { + "plan_id": "146", + "plan": "VRBL4G", + "ram": "4048MB", + "disk": "150GB", + "transfer": "3000GB", + "price": "300.00", + "available": "8" }, { + "plan_id": "119", "plan": "VR8096", "ram": "8096MB", "disk": "320GB", "transfer": "6400GB", "price": "320.00", - "available": "9" + "available": "11" + }, + { + "plan_id": "155", + "plan": "VRBL8G", + "ram": "8096MB", + "disk": "200GB", + "transfer": "5000GB", + "price": "400.00", + "available": "4" } ] diff --git a/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml b/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml new file mode 100644 index 0000000000..0f38ef053f --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml @@ -0,0 +1,17 @@ + + + + + + + faultCode + SoftLayer_Account + + + faultString + Failed Call + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/empty.xml b/libcloud/test/compute/fixtures/softlayer/empty.xml new file mode 100644 index 0000000000..1acdcc956a --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/empty.xml @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/softlayer/fail.xml b/libcloud/test/compute/fixtures/softlayer/fail.xml new file mode 100644 index 0000000000..4cd0162f66 --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/fail.xml @@ -0,0 +1,17 @@ + + + + + + + faultCode + fail + + + faultString + Failed Call + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml b/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml index 0452f07a64..ddabcc5ee0 100644 --- a/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml +++ b/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml @@ -116,9 +116,8 @@ - softwareComponents + operatingSystem - @@ -198,7 +197,6 @@ - @@ -1064,3 +1062,4 @@ + diff --git a/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml b/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml index 9b3799dee6..b70419488e 100644 --- a/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml +++ b/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml @@ -15,13 +15,13 @@ name - dal00 + dal05 longName - Corporate HQ + Dallas 5 @@ -92,6 +92,28 @@ + + + + id + + 12345 + + + + name + + newcity01 + + + + longName + + New City + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml new file mode 100644 index 0000000000..09659cde75 --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml @@ -0,0 +1,142 @@ + + + + + + + accountId + + 12345 + + + + createDate + + 2013-01-01T19:31:22-06:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + domain.com + + + + fullyQualifiedDomainName + + hostname.domain.com + + + + hostname + + hostname + + + + id + + 123456 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + + + + + privateNetworkOnlyFlag + + 0 + + + + startCpus + + 2 + + + + statusId + + 1001 + + + + globalIdentifier + + f47ac10b-58cc-4372-a567-0e02b2c3d479 + + + + managedResourceFlag + + 0 + + + + powerState + + + + keyName + + HALTED + + + + name + + Halted + + + + + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml new file mode 100644 index 0000000000..881915d74c --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml @@ -0,0 +1,67 @@ + + + + + + + operatingSystems + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + CentOS 6.0 - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CENTOS_6_64 + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml new file mode 100644 index 0000000000..868c49feba --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml @@ -0,0 +1,519 @@ + + + + + + + accountId + + 12345 + + + + createDate + + 2013-01-01T19:31:22-06:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + domain.com + + + + fullyQualifiedDomainName + + hostname.domain.com + + + + hostname + + hostname + + + + id + + 123456 + + + + lastPowerStateId + + 1 + + + + lastVerifiedDate + + + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + 2013-01-01T19:36:47-06:00 + + + + privateNetworkOnlyFlag + + 0 + + + + startCpus + + 2 + + + + statusId + + 1001 + + + + uuid + + f47ac10b-58cc-4372-a567-0e02b2c3d479 + + + + billingItem + + + + allowCancellationFlag + + 1 + + + + cancellationDate + + + + + + categoryCode + + guest_core + + + + createDate + + 2013-01-01T19:31:26-06:00 + + + + currentHourlyCharge + + 0 + + + + cycleStartDate + + 2013-01-01T19:38:00-06:00 + + + + description + + 2 x 2.0 GHz Cores + + + + domainName + + domain.com + + + + hostName + + hostname + + + + hourlyRecurringFee + + 0 + + + + hoursUsed + + 1 + + + + id + + 12345678 + + + + laborFee + + 0 + + + + laborFeeTaxRate + + .066 + + + + lastBillDate + + 2013-01-01T19:38:00-06:00 + + + + modifyDate + + 2013-01-01T19:38:00-06:00 + + + + nextBillDate + + 2013-01-19T00:00:00-06:00 + + + + oneTimeFee + + 0 + + + + oneTimeFeeTaxRate + + .066 + + + + orderItemId + + 98765 + + + + parentId + + + + + + recurringFee + + 0 + + + + recurringFeeTaxRate + + .066 + + + + recurringMonths + + 1 + + + + serviceProviderId + + 1 + + + + setupFee + + 0 + + + + setupFeeTaxRate + + .066 + + + + resourceTableId + + 1412553 + + + + + + + globalIdentifier + + f47ac10b-58cc-4372-a567-0e02b2c3d479 + + + + managedResourceFlag + + 0 + + + + operatingSystem + + + + hardwareId + + + + + + id + + 12345 + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + createDate + + 2013-01-01T19:33:12-06:00 + + + + id + + 12345 + + + + modifyDate + + 2013-01-01T19:33:12-06:00 + + + + password + + abczyx + + + + port + + + + + + softwareId + + 1809157 + + + + username + + root + + + + + + + + + + softwareLicense + + + + id + + 1104 + + + + softwareDescriptionId + + 1025 + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 1025 + + + + manufacturer + + Ubuntu + + + + name + + Ubuntu + + + + operatingSystem + + 1 + + + + referenceCode + + UBUNTU_12_64 + + + + requiredUser + + root + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 12.04-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + + + + + + + + + + powerState + + + + keyName + + RUNNING + + + + name + + Running + + + + + + + primaryBackendIpAddress + + 10.80.10.10 + + + + primaryIpAddress + + 173.193.10.10 + + + + provisionDate + + 2013-01-01T19:38:01-06:00 + + + + + + diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 868d5c8859..95f8fd7155 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -231,9 +231,9 @@ def test_list_sizes(self): self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) elif region_name == 'eu-west-1': - self.assertEqual(len(sizes), 11) + self.assertEqual(len(sizes), 13) else: - self.assertEqual(len(sizes), 10) + self.assertEqual(len(sizes), 12) self.driver.region_name = region_old diff --git a/libcloud/test/compute/test_gandi.py b/libcloud/test/compute/test_gandi.py index ff0e037d99..f1076825aa 100644 --- a/libcloud/test/compute/test_gandi.py +++ b/libcloud/test/compute/test_gandi.py @@ -19,125 +19,26 @@ import string from libcloud.utils.py3 import httplib -from libcloud.utils.py3 import xmlrpclib from libcloud.compute.drivers.gandi import GandiNodeDriver -from libcloud.compute.base import StorageVolume from libcloud.common.gandi import GandiException from libcloud.compute.types import NodeState -from xml.etree import ElementTree as ET -from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import GANDI_PARAMS -from libcloud.test.common.test_gandi import MockGandiTransport, BaseGandiTests +from libcloud.test.common.test_gandi import BaseGandiMockHttp -class GandiMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('gandi') - - def _xmlrpc__datacenter_list(self, method, url, body, headers): - body = self.fixtures.load('datacenter_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__image_list(self, method, url, body, headers): - body = self.fixtures.load('image_list_dc0.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_list(self, method, url, body, headers): - body = self.fixtures.load('vm_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__ip_list(self, method, url, body, headers): - body = self.fixtures.load('ip_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__account_info(self, method, url, body, headers): - body = self.fixtures.load('account_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_info(self, method, url, body, headers): - body = self.fixtures.load('vm_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_delete(self, method, url, body, headers): - body = self.fixtures.load('vm_delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__operation_info(self, method, url, body, headers): - body = self.fixtures.load('operation_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_create_from(self, method, url, body, headers): - body = self.fixtures.load('vm_create_from.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_reboot(self, method, url, body, headers): - body = self.fixtures.load('vm_reboot.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_stop(self, method, url, body, headers): - body = self.fixtures.load('vm_stop.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__iface_list(self, method, url, body, headers): - body = self.fixtures.load('iface_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_list(self, method, url, body, headers): - body = self.fixtures.load('disk_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_iface_attach(self, method, url, body, headers): - body = self.fixtures.load('iface_attach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_iface_detach(self, method, url, body, headers): - body = self.fixtures.load('iface_detach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_disk_attach(self, method, url, body, headers): - body = self.fixtures.load('disk_attach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__vm_disk_detach(self, method, url, body, headers): - body = self.fixtures.load('disk_detach.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_create(self, method, url, body, headers): - body = self.fixtures.load('disk_create.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_create_from(self, method, url, body, headers): - body = self.fixtures.load('disk_create_from.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_info(self, method, url, body, headers): - body = self.fixtures.load('disk_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_update(self, method, url, body, headers): - body = self.fixtures.load('disk_update.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__disk_delete(self, method, url, body, headers): - body = self.fixtures.load('disk_delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - -class DummyTransport(MockGandiTransport): - mockCls = GandiMockHttp - - -class GandiTests(BaseGandiTests): - - driverCls = GandiNodeDriver - transportCls = DummyTransport - params = GANDI_PARAMS +class GandiTests(unittest.TestCase): node_name = 'test2' + def setUp(self): + GandiNodeDriver.connectionCls.conn_classes = ( + GandiMockHttp, GandiMockHttp) + GandiMockHttp.type = None + self.driver = GandiNodeDriver(*GANDI_PARAMS) + def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertTrue(len(nodes) > 0) @@ -254,5 +155,98 @@ def test_ex_update_disk(self): self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) +class GandiMockHttp(BaseGandiMockHttp): + + fixtures = ComputeFileFixtures('gandi') + + def _xmlrpc__datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_delete(self, method, url, body, headers): + body = self.fixtures.load('vm_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_reboot(self, method, url, body, headers): + body = self.fixtures.load('vm_reboot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_stop(self, method, url, body, headers): + body = self.fixtures.load('vm_stop.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__iface_list(self, method, url, body, headers): + body = self.fixtures.load('iface_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_list(self, method, url, body, headers): + body = self.fixtures.load('disk_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_iface_attach(self, method, url, body, headers): + body = self.fixtures.load('iface_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_iface_detach(self, method, url, body, headers): + body = self.fixtures.load('iface_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_disk_attach(self, method, url, body, headers): + body = self.fixtures.load('disk_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__vm_disk_detach(self, method, url, body, headers): + body = self.fixtures.load('disk_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_create(self, method, url, body, headers): + body = self.fixtures.load('disk_create.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_create_from(self, method, url, body, headers): + body = self.fixtures.load('disk_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_info(self, method, url, body, headers): + body = self.fixtures.load('disk_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_update(self, method, url, body, headers): + body = self.fixtures.load('disk_update.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__disk_delete(self, method, url, body, headers): + body = self.fixtures.load('disk_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/compute/test_hostvirtual.py b/libcloud/test/compute/test_hostvirtual.py index 36ed596944..9a83b1588d 100644 --- a/libcloud/test/compute/test_hostvirtual.py +++ b/libcloud/test/compute/test_hostvirtual.py @@ -45,9 +45,9 @@ def test_list_nodes(self): def test_list_sizes(self): sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 10) - self.assertEqual(sizes[0].id, 'VR256') - self.assertEqual(sizes[4].id, 'VR1024') + self.assertEqual(len(sizes), 14) + self.assertEqual(sizes[0].id, '31') + self.assertEqual(sizes[4].id, '71') self.assertEqual(sizes[2].ram, '512MB') self.assertEqual(sizes[2].disk, '20GB') self.assertEqual(sizes[3].bandwidth, '600GB') @@ -70,11 +70,18 @@ def test_reboot_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.reboot_node(node)) - def test_stop_node(self): + def test_ex_get_node(self): + node = self.driver.ex_get_node(node_id='62291') + self.assertEqual(node.id, '62291') + self.assertEqual(node.name, 'server1.vr-cluster.org') + self.assertEqual(node.state, NodeState.TERMINATED) + self.assertTrue('208.111.45.250' in node.public_ips) + + def test_ex_stop_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_stop_node(node)) - def test_start_node(self): + def test_ex_start_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_start_node(node)) @@ -82,6 +89,10 @@ def test_destroy_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.destroy_node(node)) + def test_ex_delete_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_delete_node(node)) + def test_create_node(self): auth = NodeAuthPassword('vr!@#hosted#@!') size = self.driver.list_sizes()[0] @@ -95,6 +106,14 @@ def test_create_node(self): self.assertEqual('76070', node.id) self.assertEqual('test.com', node.name) + def test_ex_build_node(self): + node = self.driver.list_nodes()[0] + auth = NodeAuthPassword('vr!@#hosted#@!') + self.assertTrue(self.driver.ex_build_node( + node=node, + auth=auth + )) + def test_create_node_in_location(self): auth = NodeAuthPassword('vr!@#hosted#@!') size = self.driver.list_sizes()[0] @@ -118,6 +137,10 @@ def _vapi_cloud_servers(self, method, url, body, headers): body = self.fixtures.load('list_nodes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _vapi_cloud_server(self, method, url, body, headers): + body = self.fixtures.load('get_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _vapi_cloud_sizes(self, method, url, body, headers): body = self.fixtures.load('list_sizes.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) @@ -150,6 +173,14 @@ def _vapi_cloud_buy_build(self, method, url, body, headers): body = self.fixtures.load('create_node.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _vapi_cloud_server_build(self, method, url, body, headers): + body = self.fixtures.load('create_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _vapi_cloud_server_delete(self, method, url, body, headers): + body = self.fixtures.load('node_destroy.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/compute/test_softlayer.py b/libcloud/test/compute/test_softlayer.py index 77d8e4bfbb..fc58d12e68 100644 --- a/libcloud/test/compute/test_softlayer.py +++ b/libcloud/test/compute/test_softlayer.py @@ -18,37 +18,28 @@ from xml.etree import ElementTree as ET +from libcloud.common.types import InvalidCredsError + from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib from libcloud.utils.py3 import next +from libcloud.utils.py3 import u from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer +from libcloud.compute.drivers.softlayer import SoftLayerException from libcloud.compute.types import NodeState from libcloud.test import MockHttp # pylint: disable-msg=E0611 -from libcloud.test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 from libcloud.test.secrets import SOFTLAYER_PARAMS -class MockSoftLayerTransport(xmlrpclib.Transport): - - def request(self, host, handler, request_body, verbose=0): - self.verbose = 0 - method = ET.XML(request_body).find('methodName').text - mock = SoftLayerMockHttp(host, 80) - mock.request('POST', "%s/%s" % (handler, method)) - resp = mock.getresponse() - - if sys.version[0] == '2' and sys.version[2] == '7': - response = self.parse_response(resp) - else: - response = self.parse_response(resp.body) - return response class SoftLayerTests(unittest.TestCase): def setUp(self): - SoftLayer.connectionCls.proxyCls.transportCls = [ - MockSoftLayerTransport, MockSoftLayerTransport] + SoftLayer.connectionCls.conn_classes = ( + SoftLayerMockHttp, SoftLayerMockHttp) + SoftLayerMockHttp.type = None self.driver = SoftLayer(*SOFTLAYER_PARAMS) def test_list_nodes(self): @@ -59,36 +50,142 @@ def test_list_nodes(self): def test_list_locations(self): locations = self.driver.list_locations() - seattle = next(l for l in locations if l.name == 'sea01') - self.assertEqual(seattle.country, 'US') - self.assertEqual(seattle.id, '18171') + dal = next(l for l in locations if l.id == 'dal05') + self.assertEqual(dal.country, 'US') + self.assertEqual(dal.id, 'dal05') + self.assertEqual(dal.name, 'Dallas 5') def test_list_images(self): images = self.driver.list_images() image = images[0] - self.assertEqual(image.id, '1684') + self.assertEqual(image.id, 'CENTOS_6_64') def test_list_sizes(self): sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 2) - size = [s for s in sizes if s.id == 'sl1'] + self.assertEqual(len(sizes), 10) + size = [s for s in sizes if s.id == 'sl2_local_disk'] self.assertEqual(len(size), 1) + def test_create_node(self): + self.driver.create_node(name="Test", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_fail(self): + SoftLayerMockHttp.type = "SOFTLAYEREXCEPTION" + self.assertRaises( + SoftLayerException, + self.driver.create_node, + name="SOFTLAYEREXCEPTION", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_creds_error(self): + SoftLayerMockHttp.type = "INVALIDCREDSERROR" + self.assertRaises( + InvalidCredsError, + self.driver.create_node, + name="INVALIDCREDSERROR", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_node_no_location(self): + self.driver.create_node(name="Test", + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_node_no_image(self): + self.driver.create_node(name="Test", size=self.driver.list_sizes()[0]) + + def test_create_node_san(self): + size = [s for s in self.driver.list_sizes() if 'san' in s.id][0] + self.driver.create_node(name="Test", size=size) + + def test_create_node_domain_for_name(self): + size = [s for s in self.driver.list_sizes() if 'san' in s.id][0] + self.driver.create_node(name="libcloud.org") + + def test_create_node_ex_options(self): + self.driver.create_node(name="Test", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + ex_domain='libcloud.org', + ex_cpus=2, + ex_ram=2048, + ex_disk=100, + ex_bandwidth=10, + ex_local_disk=False, + ex_datacenter='Dal05', + ex_os='UBUNTU_LATEST') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + class SoftLayerMockHttp(MockHttp): fixtures = ComputeFileFixtures('softlayer') - def _xmlrpc_v3__SoftLayer_Account_getVirtualGuests( - self, method, url, body, headers): + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" + + def _xmlrpc(self, method, url, body, headers): + params, meth_name = xmlrpclib.loads(body) + url = url.replace("/", "_") + meth_name = "%s_%s" % (url, meth_name) + return getattr(self, meth_name)(method, url, body, headers) + + def _xmlrpc_v3__SoftLayer_Virtual_Guest_getCreateObjectOptions( + self, method, url, body, headers): + body = self.fixtures.load( + 'v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _xmlrpc_v3__SoftLayer_Account_getVirtualGuests( + self, method, url, body, headers): body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _xmlrpc_v3__SoftLayer_Location_Datacenter_getDatacenters( - self, method, url, body, headers): - + self, method, url, body, headers): body = self.fixtures.load( 'v3_SoftLayer_Location_Datacenter_getDatacenters.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _xmlrpc_v3__SoftLayer_Virtual_Guest_createObject( + self, method, url, body, headers): + fixture = { + None: 'v3__SoftLayer_Virtual_Guest_createObject.xml', + 'INVALIDCREDSERROR': 'SoftLayer_Account.xml', + 'SOFTLAYEREXCEPTION': 'fail.xml', + }[self.type] + body = self.fixtures.load(fixture) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3__SoftLayer_Virtual_Guest_getObject( + self, method, url, body, headers): + body = self.fixtures.load( + 'v3__SoftLayer_Virtual_Guest_getObject.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3__SoftLayer_Virtual_Guest_rebootSoft( + self, method, url, body, headers): + body = self.fixtures.load('empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3__SoftLayer_Virtual_Guest_deleteObject( + self, method, url, body, headers): + body = self.fixtures.load('empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/compute/test_vcl.py b/libcloud/test/compute/test_vcl.py index 40a237c514..6240a9615c 100644 --- a/libcloud/test/compute/test_vcl.py +++ b/libcloud/test/compute/test_vcl.py @@ -29,31 +29,13 @@ from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VCL_PARAMS -class MockVCLTransport(xmlrpclib.Transport): - - def __init__(self, datetime, user, passwd, host): - self._use_datetime = datetime - self._connection = (None, None) - self._extra_headers = [] - self._use_builtin_types = False - - def request(self, host, handler, request_body, verbose=0): - self.verbose = 0 - method = ET.XML(request_body).find('methodName').text - mock = VCLMockHttp(host, 80) - mock.request('POST', method) - resp = mock.getresponse() - - if sys.version[0] == '2' and sys.version[2] == '7': - response = self.parse_response(resp) - else: - response = self.parse_response(resp.body) - return response class VCLTests(unittest.TestCase): def setUp(self): - VCL.connectionCls.proxyCls.transportCls = MockVCLTransport + VCL.connectionCls.conn_classes = ( + VCLMockHttp, VCLMockHttp) + VCLMockHttp.type = None self.driver = VCL(*VCL_PARAMS) def test_list_nodes(self): @@ -98,50 +80,48 @@ def test_ex_get_request_end_time(self): 1334168100 ) + class VCLMockHttp(MockHttp): fixtures = ComputeFileFixtures('vcl') + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" + + def _xmlrpc(self, method, url, body, headers): + params, meth_name = xmlrpclib.loads(body) + if self.type: + meth_name = "%s_%s" % (meth_name, self.type) + return getattr(self, meth_name)(method, url, body, headers) + def XMLRPCgetImages(self, method, url, body, headers): body = self.fixtures.load('XMLRPCgetImages.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCextendRequest( - self, method, url, body, headers): - + def XMLRPCextendRequest(self, method, url, body, headers): body = self.fixtures.load('XMLRPCextendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCgetRequestIds( - self, method, url, body, headers): - + def XMLRPCgetRequestIds(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestIds.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCgetRequestStatus( - self, method, url, body, headers): - + def XMLRPCgetRequestStatus(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestStatus.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCendRequest( - self, method, url, body, headers): - + def XMLRPCendRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCaddRequest( - self, method, url, body, headers): - + def XMLRPCaddRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCaddRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def XMLRPCgetRequestConnectData( - self, method, url, body, headers): - + def XMLRPCgetRequestConnectData(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestConnectData.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) diff --git a/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml b/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml new file mode 100644 index 0000000000..c9f245d0cf --- /dev/null +++ b/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml @@ -0,0 +1,17 @@ + + + + + + + faultCode + 581042 + + + faultString + Zone does not exist + + + + + diff --git a/libcloud/test/dns/test_gandi.py b/libcloud/test/dns/test_gandi.py index 03b755974e..04060778b2 100644 --- a/libcloud/test/dns/test_gandi.py +++ b/libcloud/test/dns/test_gandi.py @@ -17,106 +17,21 @@ import unittest from libcloud.utils.py3 import httplib -from libcloud.utils.py3 import xmlrpclib from libcloud.dns.types import RecordType, ZoneDoesNotExistError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.drivers.gandi import GandiDNSDriver -from libcloud.test import MockHttp from libcloud.test.file_fixtures import DNSFileFixtures from libcloud.test.secrets import DNS_GANDI -from libcloud.test.common.test_gandi import MockGandiTransport, BaseGandiTests +from libcloud.test.common.test_gandi import BaseGandiMockHttp -Fault = xmlrpclib.Fault -class GandiMockHttp(MockHttp): - fixtures = DNSFileFixtures('gandi') - - def _xmlrpc__domain_zone_create(self, method, url, body, headers): - body = self.fixtures.load('create_zone.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_update(self, method, url, body, headers): - body = self.fixtures.load('get_zone.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) +class GandiTests(unittest.TestCase): - def _xmlrpc__domain_zone_list(self, method, url, body, headers): - body = self.fixtures.load('list_zones.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_list(self, method, url, body, headers): - body = self.fixtures.load('list_records.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_add(self, method, url, body, headers): - body = self.fixtures.load('create_record.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_delete(self, method, url, body, headers): - body = self.fixtures.load('delete_zone.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_info(self, method, url, body, headers): - body = self.fixtures.load('get_zone.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_delete(self, method, url, body, headers): - body = self.fixtures.load('delete_record.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_update(self, method, url, body, headers): - body = self.fixtures.load('create_record.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_version_new(self, method, url, body, headers): - body = self.fixtures.load('new_version.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_version_set(self, method, url, body, headers): - body = self.fixtures.load('new_version.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_list_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): - raise Fault(581042, "Zone does not exist") - - def _xmlrpc__domain_zone_info_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): - raise Fault(581042, "Zone does not exist") - - def _xmlrpc__domain_zone_list_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): - raise Fault(581042, "Zone does not exist") - - def _xmlrpc__domain_zone_delete_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): - raise Fault(581042, "Zone does not exist") - - def _xmlrpc__domain_zone_record_list_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): - body = self.fixtures.load('list_records_empty.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_info_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): - body = self.fixtures.load('list_zones.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_record_delete_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): - body = self.fixtures.load('delete_record_doesnotexist.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_version_new_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): - body = self.fixtures.load('new_version.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc__domain_zone_version_set_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): - body = self.fixtures.load('new_version.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - -class DummyTransport(MockGandiTransport): - mockCls = GandiMockHttp - - -class GandiTests(BaseGandiTests): - - driverCls = GandiDNSDriver - transportCls = DummyTransport - params = DNS_GANDI + def setUp(self): + GandiDNSDriver.connectionCls.conn_classes = ( + GandiMockHttp, GandiMockHttp) + GandiMockHttp.type = None + self.driver = GandiDNSDriver(*DNS_GANDI) def test_list_record_types(self): record_types = self.driver.list_record_types() @@ -285,5 +200,97 @@ def test_delete_record_does_not_exist(self): self.fail('Exception was not thrown') +class GandiMockHttp(BaseGandiMockHttp): + fixtures = DNSFileFixtures('gandi') + + def _xmlrpc__domain_zone_create(self, method, url, body, headers): + body = self.fixtures.load('create_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_update(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_list(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_add(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_update(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_list_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_delete_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('list_records_empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('delete_record_doesnotexist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) From 1cd8d51052ec4f71beb51ee27a80ce6d22824f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 6 Feb 2013 22:44:44 +0000 Subject: [PATCH 017/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1443254 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 11 +++++++++++ libcloud/compute/drivers/elastichosts.py | 15 +++++++++++++++ libcloud/compute/drivers/ibm_sce.py | 18 +++++++++++++++++- libcloud/compute/drivers/openstack.py | 7 +++++-- .../compute/fixtures/ibm_sce/destroy_image.xml | 1 + .../v1_slug_servers_no_admin_pass.xml | 12 ++++++++++++ libcloud/test/compute/test_ibm_sce.py | 12 +++++++++++- libcloud/test/compute/test_openstack.py | 14 ++++++++++++++ 8 files changed, 86 insertions(+), 4 deletions(-) create mode 100644 libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml create mode 100644 libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml diff --git a/CHANGES b/CHANGES index ce0bd1823b..67c735956a 100644 --- a/CHANGES +++ b/CHANGES @@ -138,6 +138,17 @@ Changes with Apache Libcloud 0.12.0: ex_build_node extension method. (LIBCLOUD-249) [Dinesh Bhoopathy] + - Add ex_destroy_image method to IBM SCE driver. (LIBCLOUD-291) + [Perry Zou] + + - Add the following new regions to the ElasticHosts driver: sjc-c, syd-v, + hkg-e. (LIBCLOUD-293) + [Tomaz Muraus] + + - Fix create_node in OpenStack driver to work correctly if 'adminPass' + attribute is not present in the response. + [Gavin McCance, Tomaz Muraus] + *) Storage - Add a new local storage driver. diff --git a/libcloud/compute/drivers/elastichosts.py b/libcloud/compute/drivers/elastichosts.py index 1b9980ed49..9b69cac2f8 100644 --- a/libcloud/compute/drivers/elastichosts.py +++ b/libcloud/compute/drivers/elastichosts.py @@ -44,10 +44,25 @@ 'country': 'United States', 'host': 'api.lax-p.elastichosts.com' }, + 'us-3': { + 'name': 'San Jose (Silicon Valley)', + 'country': 'United States', + 'host': 'api.sjc-c.elastichosts.com' + }, 'ca-1': { 'name': 'Toronto Peer 1', 'country': 'Canada', 'host': 'api.tor-p.elastichosts.com' + }, + 'au-1': { + 'name': 'Sydney', + 'country': 'Australia', + 'host': 'api.syd-v.elastichosts.com' + }, + 'cn-1': { + 'name': 'Hong Kong', + 'country': 'China', + 'host': 'api.hkg-e.elastichosts.com' } } diff --git a/libcloud/compute/drivers/ibm_sce.py b/libcloud/compute/drivers/ibm_sce.py index f7b57d6842..0ffb8359f6 100644 --- a/libcloud/compute/drivers/ibm_sce.py +++ b/libcloud/compute/drivers/ibm_sce.py @@ -26,6 +26,7 @@ import time from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import XmlResponse, ConnectionUserAndKey @@ -329,7 +330,7 @@ def destroy_node(self, node): url = REST_BASE + '/instances/%s' % (node.id) status = int(self.connection.request(action=url, method='DELETE').status) - return status == 200 + return status == httplib.OK def destroy_volume(self, volume): """ @@ -341,6 +342,21 @@ def destroy_volume(self, volume): @rtype: C{bool} """ url = REST_BASE + '/storage/%s' % (volume.id) + status = int(self.connection.request(action=url, + method='DELETE').status) + return status == httplib.OK + + def ex_destroy_image(self,image): + """ + Destroys an image. + + @param image: Image to be destroyed + @type image: L{NodeImage} + + @return: C{bool} + """ + + url = REST_BASE + '/offerings/image/%s' % (image.id) status = int(self.connection.request(action=url, method='DELETE').status) return status == 200 diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 91ceb0ad89..25c5545f53 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1090,7 +1090,10 @@ def create_node(self, **kwargs): server_resp = self.connection.request( '/servers/%s' % create_response['id']) server_object = server_resp.object['server'] - server_object['adminPass'] = create_response['adminPass'] + + # adminPass is not always present + # http://docs.openstack.org/essex/openstack-compute/admin/content/configuring-compute-API.html#d6e1833 + server_object['adminPass'] = create_response.get('adminPass', None) return self._to_node(server_object) @@ -1614,7 +1617,7 @@ def _to_node(self, api_node): uri=next(link['href'] for link in api_node['links'] if link['rel'] == 'self'), metadata=api_node['metadata'], - password=api_node.get('adminPass'), + password=api_node.get('adminPass', None), created=api_node['created'], updated=api_node['updated'], key_name=api_node.get('key_name', None), diff --git a/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml b/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml new file mode 100644 index 0000000000..a957946ae0 --- /dev/null +++ b/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml b/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml new file mode 100644 index 0000000000..e9b9d35115 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/libcloud/test/compute/test_ibm_sce.py b/libcloud/test/compute/test_ibm_sce.py index 8f8269c583..663512bc5d 100644 --- a/libcloud/test/compute/test_ibm_sce.py +++ b/libcloud/test/compute/test_ibm_sce.py @@ -187,6 +187,12 @@ def test_destroy_volume(self): IBMMockHttp.type = 'DESTROY' ret = self.driver.destroy_volume(vols[0]) self.assertTrue(ret) + + def test_ex_destroy_image(self): + image = self.driver.list_images() + IBMMockHttp.type = 'DESTROY' + ret = self.driver.ex_destroy_image(image[0]) + self.assertTrue(ret) def test_detach_volume(self): nodes = self.driver.list_nodes() @@ -278,7 +284,11 @@ def _computecloud_enterprise_api_rest_20100331_storage_CREATE(self, method, url, def _computecloud_enterprise_api_rest_20100331_storage_39281_DESTROY(self, method, url, body, headers): body = self.fixtures.load('destroy_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - + + def _computecloud_enterprise_api_rest_20100331_offerings_image_2_DESTROY(self, method, url, body, headers): + body = self.fixtures.load('destroy_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _computecloud_enterprise_api_rest_20100331_instances_26557_DETACH(self, method, url, body, headers): body = self.fixtures.load('detach_volume.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index bca941a9f4..ba6ac382aa 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -203,6 +203,16 @@ def test_create_node(self): self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + def test_create_node_without_adminPass(self): + OpenStackMockHttp.type = 'NO_ADMIN_PASS' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), None) + def test_create_node_ex_shared_ip_group(self): OpenStackMockHttp.type = 'EX_SHARED_IP_GROUP' image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', @@ -440,6 +450,10 @@ def _v1_0_slug_servers(self, method, url, body, headers): body = self.fixtures.load('v1_slug_servers.xml') return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + def _v1_0_slug_servers_NO_ADMIN_PASS(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_no_admin_pass.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + def _v1_0_slug_servers_EX_SHARED_IP_GROUP(self, method, url, body, headers): # test_create_node_ex_shared_ip_group # Verify that the body contains sharedIpGroupId XML element From c55f3db8ac827b05d03d2438cc8af19606f45f68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 7 Feb 2013 15:39:26 +0000 Subject: [PATCH 018/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1443563 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ libcloud/compute/drivers/ec2.py | 18 +++++++++++++++++- libcloud/test/compute/test_ec2.py | 6 ++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 67c735956a..6699715ae0 100644 --- a/CHANGES +++ b/CHANGES @@ -149,6 +149,10 @@ Changes with Apache Libcloud 0.12.0: attribute is not present in the response. [Gavin McCance, Tomaz Muraus] + - Allow users to filter images returned by the list_images method in the EC2 + driver by providing ex_image_ids argument. (LIBCLOUD-294) + [Chris Psaltis, Joseph Hall] + *) Storage - Add a new local storage driver. diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 829f20a4f8..eb99e174b0 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -644,8 +644,24 @@ def list_sizes(self, location=None): sizes.append(NodeSize(driver=self, **attributes)) return sizes - def list_images(self, location=None): + def list_images(self, location=None, ex_image_ids=None): + """ + List all images + + Ex_image_ids parameter is used to filter the list of + images that should be returned. Only the images + with the corresponding image ids will be returned. + + @param ex_image_ids: List of C{NodeImage.id} + @type ex_image_ids: C{list} of C{str} + + @rtype: C{list} of L{NodeImage} + """ params = {'Action': 'DescribeImages'} + + if ex_image_ids: + params.update(self._pathlist('ImageId', ex_image_ids)) + images = self._to_images( self.connection.request(self.path, params=params).object ) diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 95f8fd7155..a91b8ec2f6 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -245,6 +245,12 @@ def test_list_images(self): 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml') self.assertEqual(image.id, 'ami-be3adfd7') + def test_list_images_with_image_ids(self): + images = self.driver.list_images(ex_image_ids=['ami-be3adfd7']) + self.assertEqual(len(images), 1) + self.assertEqual(images[0].name, + 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml') + def test_ex_list_availability_zones(self): availability_zones = self.driver.ex_list_availability_zones() availability_zone = availability_zones[0] From 4a6feed8458ceef4288caba2a05afe65cfe0ab1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 8 Feb 2013 15:58:47 +0000 Subject: [PATCH 019/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1444088 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 + libcloud/common/azure.py | 186 ++++ libcloud/storage/base.py | 13 +- libcloud/storage/drivers/azure_blobs.py | 981 ++++++++++++++++++ libcloud/storage/providers.py | 2 + libcloud/storage/types.py | 1 + libcloud/test/secrets.py-dist | 3 + .../azure_blobs/list_containers_1.xml | 29 + .../azure_blobs/list_containers_2.xml | 30 + .../azure_blobs/list_containers_empty.xml | 8 + .../fixtures/azure_blobs/list_objects_1.xml | 49 + .../fixtures/azure_blobs/list_objects_2.xml | 43 + .../azure_blobs/list_objects_empty.xml | 6 + libcloud/test/storage/test_azure_blobs.py | 944 +++++++++++++++++ 14 files changed, 2293 insertions(+), 6 deletions(-) create mode 100644 libcloud/common/azure.py create mode 100644 libcloud/storage/drivers/azure_blobs.py create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml create mode 100644 libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml create mode 100644 libcloud/test/storage/test_azure_blobs.py diff --git a/CHANGES b/CHANGES index 6699715ae0..e36047422f 100644 --- a/CHANGES +++ b/CHANGES @@ -182,6 +182,10 @@ Changes with Apache Libcloud 0.12.0: (LIBCLOUD-269) [Mahendra M] + - Add new driver for Windows Azure Storage with support for block and page + blobs. (LIBCLOUD-80) + [Mahendra M] + *) DNS - Update 'if type' checks in the update_record methods to behave correctly diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py new file mode 100644 index 0000000000..1441a073eb --- /dev/null +++ b/libcloud/common/azure.py @@ -0,0 +1,186 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +import base64 +import hmac + +from hashlib import sha256 + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b +from libcloud.utils.xml import fixxpath, findtext +from xml.etree import ElementTree + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.base import ConnectionUserAndKey, RawResponse +from libcloud.common.base import XmlResponse + +# Azure API version +API_VERSION = '2012-02-12' + +# The time format for headers in Azure requests +AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' + + +class AzureResponse(XmlResponse): + + valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_error(self, msg=None): + error_msg = 'Unknown error' + + try: + # Azure does give some meaningful errors, but is inconsistent + # Some APIs respond with an XML error. Others just dump HTML + body = self.parse_body() + + if type(body) == ElementTree.Element: + code = body.findtext(fixxpath(xpath='Code')) + message = body.findtext(fixxpath(xpath='Message')) + message = message.split('\n')[0] + error_msg = '%s: %s' % (code, message) + + except MalformedResponseError: + pass + + if msg: + error_msg = '%s - %s' % (msg, error_msg) + + if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: + raise InvalidCredsError(error_msg) + + raise LibcloudError('%s Status code: %d.' % (error_msg, self.status), + driver=self) + + +class AzureRawResponse(RawResponse): + pass + + +class AzureConnection(ConnectionUserAndKey): + """ + Represents a single connection to Azure + """ + + responseCls = AzureResponse + rawResponseCls = AzureRawResponse + + def add_default_params(self, params): + return params + + def pre_connect_hook(self, params, headers): + headers = copy.deepcopy(headers) + + # We have to add a date header in GMT + headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) + headers['x-ms-version'] = API_VERSION + + # Add the authorization header + headers['Authorization'] = self._get_azure_auth_signature( + method=self.method, headers=headers, params=params, + account=self.user_id, secret_key=self.key, path=self.action) + + # Azure cribs about this in 'raw' connections + headers.pop('Host', None) + + return params, headers + + def _get_azure_auth_signature(self, method, headers, params, + account, secret_key, path='/'): + """ + Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, + UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-Encoding + "\n" + + Content-Language + "\n" + + Content-Length + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + If-Modified-Since + "\n" + + If-Match + "\n" + + If-None-Match + "\n" + + If-Unmodified-Since + "\n" + + Range + "\n" + + CanonicalizedHeaders + + CanonicalizedResource; + """ + special_header_values = [] + xms_header_values = [] + param_list = [] + special_header_keys = ['content-encoding', 'content-language', + 'content-length', 'content-md5', + 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', + 'if-unmodified-since', 'range'] + + # Split the x-ms headers and normal headers and make everything + # lower case + headers_copy = {} + for header, value in headers.items(): + header = header.lower() + value = str(value).strip() + if header.startswith('x-ms-'): + xms_header_values.append((header, value)) + else: + headers_copy[header] = value + + # Get the values for the headers in the specific order + for header in special_header_keys: + header = header.lower() # Just for safety + if header in headers_copy: + special_header_values.append(headers_copy[header]) + else: + special_header_values.append('') + + # Prepare the first section of the string to be signed + values_to_sign = [method] + special_header_values + # string_to_sign = '\n'.join([method] + special_header_values) + + # The x-ms-* headers have to be in lower case and sorted + xms_header_values.sort() + + for header, value in xms_header_values: + values_to_sign.append('%s:%s' % (header, value)) + + # Add the canonicalized path + values_to_sign.append('/%s%s' % (account, path)) + + # URL query parameters (sorted and lower case) + for key, value in params.items(): + param_list.append((key.lower(), str(value).strip())) + + param_list.sort() + + for key, value in param_list: + values_to_sign.append('%s:%s' % (key, value)) + + string_to_sign = b('\n'.join(values_to_sign)) + secret_key = b(secret_key) + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() + ) + + return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8')) diff --git a/libcloud/storage/base.py b/libcloud/storage/base.py index b45d027230..f0e4e3a852 100644 --- a/libcloud/storage/base.py +++ b/libcloud/storage/base.py @@ -134,13 +134,14 @@ def get_object(self, object_name): return self.driver.get_object(container_name=self.name, object_name=object_name) - def upload_object(self, file_path, object_name, extra=None): + def upload_object(self, file_path, object_name, extra=None, **kwargs): return self.driver.upload_object( - file_path, self, object_name, extra) + file_path, self, object_name, extra=extra, **kwargs) - def upload_object_via_stream(self, iterator, object_name, extra=None): + def upload_object_via_stream(self, iterator, object_name, extra=None, + **kwargs): return self.driver.upload_object_via_stream( - iterator, self, object_name, extra) + iterator, self, object_name, extra=extra, **kwargs) def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): @@ -615,7 +616,7 @@ def _upload_object(self, object_name, content_type, upload_func, file_size = os.path.getsize(file_path) upload_func_kwargs['chunked'] = False - if file_size is not None: + if file_size is not None and 'Content-Length' not in headers: headers['Content-Length'] = file_size headers['Content-Type'] = content_type @@ -676,7 +677,7 @@ def _upload_data(self, response, data, calculate_hash=True): return True, data_hash, bytes_transferred def _stream_data(self, response, iterator, chunked=False, - calculate_hash=True, chunk_size=None): + calculate_hash=True, chunk_size=None, data=None): """ Stream a data over an http connection. diff --git a/libcloud/storage/drivers/azure_blobs.py b/libcloud/storage/drivers/azure_blobs.py new file mode 100644 index 0000000000..5512d700a0 --- /dev/null +++ b/libcloud/storage/drivers/azure_blobs.py @@ -0,0 +1,981 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import time +import base64 +import hmac +import re +import os +import binascii + +from hashlib import sha256 +from xml.etree.ElementTree import Element, SubElement + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import tostring +from libcloud.utils.py3 import b + +from libcloud.utils.xml import fixxpath, findtext +from libcloud.utils.files import read_in_chunks +from libcloud.common.types import LibcloudError +from libcloud.common.azure import AzureConnection + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError + +if PY3: + from io import FileIO as file + +# Desired number of items in each response inside a paginated request +RESPONSES_PER_REQUEST = 100 + +# As per the Azure documentation, if the upload file size is less than +# 64MB, we can upload it in a single request. However, in real life azure +# servers seem to disconnect randomly after around 5 MB or 200s of upload. +# So, it is better that for file sizes greater than 4MB, we upload it in +# chunks. +# Also, with large sizes, if we use a lease, the lease will timeout after +# 60 seconds, but the upload might still be in progress. This can be +# handled in code, but if we use chunked uploads, the lease renewal will +# happen automatically. +AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024 + +# Azure block blocks must be maximum 4MB +# Azure page blobs must be aligned in 512 byte boundaries (4MB fits that) +AZURE_CHUNK_SIZE = 4 * 1024 * 1024 + +# Azure page blob must be aligned in 512 byte boundaries +AZURE_PAGE_CHUNK_SIZE = 512 + +# The time period (in seconds) for which a lease must be obtained. +# If set as -1, we get an infinite lease, but that is a bad idea. If +# after getting an infinite lease, there was an issue in releasing the +# lease, the object will remain 'locked' forever, unless the lease is +# released using the lease_id (which is not exposed to the user) +AZURE_LEASE_PERIOD = 60 + + +class AzureBlobLease(object): + """ + A class to help in leasing an azure blob and renewing the lease + """ + def __init__(self, driver, object_path, use_lease): + """ + @param driver: The Azure storage driver that is being used + @type driver: L{AzureStorageDriver} + + @param object_path: The path of the object we need to lease + @type object_path: C{str} + + @param use_lease: Indicates if we must take a lease or not + @type use_lease: C{bool} + """ + self.object_path = object_path + self.driver = driver + self.use_lease = use_lease + self.lease_id = None + self.params = {'comp': 'lease'} + + def renew(self): + """ + Renew the lease if it is older than a predefined time period + """ + if self.lease_id is None: + return + + headers = {'x-ms-lease-action': 'renew', + 'x-ms-lease-id': self.lease_id, + 'x-ms-lease-duration': '60'} + + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status != httplib.OK: + raise LibcloudError('Unable to obtain lease', driver=self) + + def update_headers(self, headers): + """ + Update the lease id in the headers + """ + if self.lease_id: + headers['x-ms-lease-id'] = self.lease_id + + def __enter__(self): + if not self.use_lease: + return self + + headers = {'x-ms-lease-action': 'acquire', + 'x-ms-lease-duration': '60'} + + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status == httplib.NOT_FOUND: + return self + elif response.status != httplib.CREATED: + raise LibcloudError('Unable to obtain lease', driver=self) + + self.lease_id = response.headers['x-ms-lease-id'] + return self + + def __exit__(self, type, value, traceback): + if self.lease_id is None: + return + + headers = {'x-ms-lease-action': 'release', + 'x-ms-lease-id': self.lease_id} + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status != httplib.OK: + raise LibcloudError('Unable to release lease', driver=self) + + +class AzureBlobsConnection(AzureConnection): + """ + Represents a single connection to Azure Blobs + """ + host = 'blob.core.windows.net' + + +class AzureBlobsStorageDriver(StorageDriver): + name = 'Microsoft Azure (blobs)' + website = 'http://windows.azure.com/' + connectionCls = AzureBlobsConnection + hash_type = 'md5' + supports_chunked_encoding = False + ex_blob_type = 'BlockBlob' + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + **kwargs): + + # The hostname must be 'account.blobs.core.windows.net' + self.connectionCls.host = '%s.%s' % (key, self.connectionCls.host) + + # B64decode() this key and keep it, so that we don't have to do + # so for every request. Minor performance improvement + secret = base64.b64decode(b(secret)) + + super(AzureBlobsStorageDriver, self).__init__( + key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + + def _xml_to_container(self, node): + """ + Converts a container XML node to a container instance + + @param node: XML info of the container + @type node: L{xml.etree.ElementTree.Element} + + @return: A container instance + @rtype: L{Container} + """ + + name = node.findtext(fixxpath(xpath='Name')) + props = node.find(fixxpath(xpath='Properties')) + metadata = node.find(fixxpath(xpath='Metadata')) + + extra = { + 'url': node.findtext(fixxpath(xpath='Url')), + 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')), + 'etag': props.findtext(fixxpath(xpath='Etag')), + 'lease': { + 'status': props.findtext(fixxpath(xpath='LeaseStatus')), + 'state': props.findtext(fixxpath(xpath='LeaseState')), + 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), + }, + 'meta_data': {} + } + + for meta in metadata.getchildren(): + extra['meta_data'][meta.tag] = meta.text + + return Container(name=name, extra=extra, driver=self) + + def _response_to_container(self, container_name, response): + """ + Converts a HTTP response to a container instance + + @param container_name: Name of the container + @type container_name: C{str} + + @param response: HTTP Response + @type node: L{} + + @return: A container instance + @rtype: L{Container} + """ + + headers = response.headers + extra = { + 'url': 'http://%s%s' % (response.connection.host, + response.connection.action), + 'etag': headers['etag'], + 'last_modified': headers['last-modified'], + 'lease': { + 'status': headers.get('x-ms-lease-status', None), + 'state': headers.get('x-ms-lease-state', None), + 'duration': headers.get('x-ms-lease-duration', None), + }, + 'meta_data': {} + } + + for key, value in response.headers.items(): + if key.startswith('x-ms-meta-'): + key = key.split('x-ms-meta-')[1] + extra['meta_data'][key] = value + + return Container(name=container_name, extra=extra, driver=self) + + def _xml_to_object(self, container, blob): + """ + Converts a BLOB XML node to an object instance + + @param container: Instance of the container holding the blob + @type: L{Container} + + @param blob: XML info of the blob + @type blob: L{} + + @return: An object instance + @rtype: L{Object} + """ + + name = blob.findtext(fixxpath(xpath='Name')) + props = blob.find(fixxpath(xpath='Properties')) + metadata = blob.find(fixxpath(xpath='Metadata')) + etag = props.findtext(fixxpath(xpath='Etag')) + size = int(props.findtext(fixxpath(xpath='Content-Length'))) + + extra = { + 'content_type': props.findtext(fixxpath(xpath='Content-Type')), + 'etag': etag, + 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')), + 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')), + 'url': blob.findtext(fixxpath(xpath='Url')), + 'hash': props.findtext(fixxpath(xpath='Etag')), + 'lease': { + 'status': props.findtext(fixxpath(xpath='LeaseStatus')), + 'state': props.findtext(fixxpath(xpath='LeaseState')), + 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), + }, + 'content_encoding': props.findtext(fixxpath( + xpath='Content-Encoding')), + 'content_language': props.findtext(fixxpath( + xpath='Content-Language')), + 'blob_type': props.findtext(fixxpath(xpath='BlobType')) + } + + if extra['md5_hash']: + extra['md5_hash'] = binascii.hexlify( + base64.b64decode(b(extra['md5_hash']))) + + meta_data = {} + for meta in metadata.getchildren(): + meta_data[meta.tag] = meta.text + + return Object(name=name, size=size, hash=etag, meta_data=meta_data, + extra=extra, container=container, driver=self) + + def _response_to_object(self, object_name, container, response): + """ + Converts a HTTP response to an object (from headers) + + @param object_name: Name of the object + @type object_name: C{str} + + @param container: Instance of the container holding the blob + @type: L{Container} + + @param response: HTTP Response + @type node: L{} + + @return: An object instance + @rtype: L{Object} + """ + + headers = response.headers + size = int(headers['content-length']) + etag = headers['etag'] + + extra = { + 'url': 'http://%s%s' % (response.connection.host, + response.connection.action), + 'etag': etag, + 'md5_hash': headers.get('content-md5', None), + 'content_type': headers.get('content-type', None), + 'content_language': headers.get('content-language', None), + 'content_encoding': headers.get('content-encoding', None), + 'last_modified': headers['last-modified'], + 'lease': { + 'status': headers.get('x-ms-lease-status', None), + 'state': headers.get('x-ms-lease-state', None), + 'duration': headers.get('x-ms-lease-duration', None), + }, + 'blob_type': headers['x-ms-blob-type'] + } + + if extra['md5_hash']: + extra['md5_hash'] = binascii.hexlify( + base64.b64decode(b(extra['md5_hash']))) + + meta_data = {} + for key, value in response.headers.items(): + if key.startswith('x-ms-meta-'): + key = key.split('x-ms-meta-')[1] + meta_data[key] = value + + return Object(name=object_name, size=size, hash=etag, extra=extra, + meta_data=meta_data, container=container, driver=self) + + def iterate_containers(self): + """ + @inherits: L{StorageDriver.iterate_containers} + """ + params = {'comp': 'list', + 'maxresults': RESPONSES_PER_REQUEST, + 'include': 'metadata'} + + while True: + response = self.connection.request('/', params) + if response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + body = response.parse_body() + containers = body.find(fixxpath(xpath='Containers')) + containers = containers.findall(fixxpath(xpath='Container')) + + for container in containers: + yield self._xml_to_container(container) + + params['marker'] = body.findtext('NextMarker') + if not params['marker']: + break + + def iterate_container_objects(self, container): + """ + @inherits: L{StorageDriver.iterate_container_objects} + """ + params = {'restype': 'container', + 'comp': 'list', + 'maxresults': RESPONSES_PER_REQUEST, + 'include': 'metadata'} + + container_path = self._get_container_path(container) + + while True: + response = self.connection.request(container_path, + params=params) + + if response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + elif response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + body = response.parse_body() + blobs = body.find(fixxpath(xpath='Blobs')) + blobs = blobs.findall(fixxpath(xpath='Blob')) + + for blob in blobs: + yield self._xml_to_object(container, blob) + + params['marker'] = body.findtext('NextMarker') + if not params['marker']: + break + + def get_container(self, container_name): + """ + @inherits: L{StorageDriver.get_container} + """ + params = {'restype': 'container'} + + container_path = '/%s' % (container_name) + + response = self.connection.request(container_path, params=params, + method='HEAD') + + if response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError('Container %s does not exist' % + (container_name), driver=self, + container_name=container_name) + elif response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + return self._response_to_container(container_name, response) + + def get_object(self, container_name, object_name): + """ + @inherits: L{StorageDriver.get_object} + """ + + container = self.get_container(container_name=container_name) + object_path = self._get_object_path(container, object_name) + + response = self.connection.request(object_path, method='HEAD') + + if response.status == httplib.OK: + obj = self._response_to_object(object_name, container, response) + return obj + + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=object_name) + + def _get_container_path(self, container): + """ + Return a container path + + @param container: Container instance + @type container: L{Container} + + @return: A path for this container. + @rtype: C{str} + """ + return '/%s' % (container.name) + + def _get_object_path(self, container, object_name): + """ + Return an object's CDN path. + + @param container: Container instance + @type container: L{Container} + + @param object_name: Object name + @type object_name: L{str} + + @return: A path for this object. + @rtype: C{str} + """ + container_url = self._get_container_path(container) + object_name_cleaned = urlquote(object_name) + object_path = '%s/%s' % (container_url, object_name_cleaned) + return object_path + + def create_container(self, container_name): + """ + @inherits: L{StorageDriver.create_container} + """ + params = {'restype': 'container'} + + container_path = '/%s' % (container_name) + response = self.connection.request(container_path, params=params, + method='PUT') + + if response.status == httplib.CREATED: + return self._response_to_container(container_name, response) + elif response.status == httplib.CONFLICT: + raise ContainerAlreadyExistsError( + value='Container with this name already exists. The name must ' + 'be unique among all the containers in the system', + container_name=container_name, driver=self) + elif response.status == httplib.BAD_REQUEST: + raise InvalidContainerNameError(value='Container name contains ' + + 'invalid characters.', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def delete_container(self, container): + """ + @inherits: L{StorageDriver.delete_container} + """ + # Azure does not check if the container is empty. So, we will do + # a check to ensure that the behaviour is similar to other drivers + for obj in container.iterate_objects(): + raise ContainerIsNotEmptyError( + value='Container must be empty before it can be deleted.', + container_name=container.name, driver=self) + + params = {'restype': 'container'} + container_path = self._get_container_path(container) + + # Note: All the objects in the container must be deleted first + response = self.connection.request(container_path, params=params, + method='DELETE') + + if response.status == httplib.ACCEPTED: + return True + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + return False + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + """ + @inherits: L{StorageDriver.download_object} + """ + obj_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(obj_path, raw=True, data=None) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={ + 'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + """ + @inherits: L{StorageDriver.download_object_as_stream} + """ + obj_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(obj_path, raw=True, data=None) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def _upload_in_chunks(self, response, data, iterator, object_path, + blob_type, lease, calculate_hash=True): + """ + Uploads data from an interator in fixed sized chunks to S3 + + @param response: Response object from the initial POST request + @type response: L{RawResponse} + + @param data: Any data from the initial POST request + @type data: C{str} + + @param iterator: The generator for fetching the upload data + @type iterator: C{generator} + + @param object_path: The path of the object to which we are uploading + @type object_name: C{str} + + @param blob_type: The blob type being uploaded + @type blob_type: C{str} + + @param lease: The lease object to be used for renewal + @type lease: L{AzureBlobLease} + + @keyword calculate_hash: Indicates if we must calculate the data hash + @type calculate_hash: C{bool} + + @return: A tuple of (status, checksum, bytes transferred) + @rtype: C{tuple} + """ + + # Get the upload id from the response xml + if response.status != httplib.CREATED: + raise LibcloudError('Error initializing upload. Code: %d' % + (response.status), driver=self) + + data_hash = None + if calculate_hash: + data_hash = self._get_hash_function() + + bytes_transferred = 0 + count = 1 + chunks = [] + headers = {} + + lease.update_headers(headers) + + if blob_type == 'BlockBlob': + params = {'comp': 'block'} + else: + params = {'comp': 'page'} + + # Read the input data in chunk sizes suitable for AWS + for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE): + data = b(data) + content_length = len(data) + offset = bytes_transferred + bytes_transferred += content_length + + if calculate_hash: + data_hash.update(data) + + chunk_hash = self._get_hash_function() + chunk_hash.update(data) + chunk_hash = base64.b64encode(b(chunk_hash.digest())) + + headers['Content-MD5'] = chunk_hash.decode('utf-8') + headers['Content-Length'] = content_length + + if blob_type == 'BlockBlob': + # Block id can be any unique string that is base64 encoded + # A 10 digit number can hold the max value of 50000 blocks + # that are allowed for azure + block_id = base64.b64encode(b('%10d' % (count))) + block_id = block_id.decode('utf-8') + params['blockid'] = block_id + + # Keep this data for a later commit + chunks.append(block_id) + else: + headers['x-ms-page-write'] = 'update' + headers['x-ms-range'] = 'bytes=%d-%d' % \ + (offset, bytes_transferred-1) + + # Renew lease before updating + lease.renew() + + resp = self.connection.request(object_path, method='PUT', + data=data, headers=headers, + params=params) + + if resp.status != httplib.CREATED: + resp.parse_error() + raise LibcloudError('Error uploading chunk %d. Code: %d' % + (count, resp.status), driver=self) + + count += 1 + + if calculate_hash: + data_hash = data_hash.hexdigest() + + if blob_type == 'BlockBlob': + self._commit_blocks(object_path, chunks, lease) + + # The Azure service does not return a hash immediately for + # chunked uploads. It takes some time for the data to get synced + response.headers['content-md5'] = None + + return (True, data_hash, bytes_transferred) + + def _commit_blocks(self, object_path, chunks, lease): + """ + Makes a final commit of the data. + + @param object_path: Server side object path. + @type object_path: C{str} + + @param upload_id: A list of (chunk_number, chunk_hash) tuples. + @type upload_id: C{list} + """ + + root = Element('BlockList') + + for block_id in chunks: + part = SubElement(root, 'Uncommitted') + part.text = str(block_id) + + data = tostring(root) + params = {'comp': 'blocklist'} + headers = {} + + lease.update_headers(headers) + lease.renew() + + response = self.connection.request(object_path, data=data, + params=params, headers=headers, + method='PUT') + + if response.status != httplib.CREATED: + raise LibcloudError('Error in blocklist commit', driver=self) + + def _check_values(self, blob_type, object_size): + """ + Checks if extension arguments are valid + + @param blob_type: The blob type that is being uploaded + @type blob_type: C{str} + + @param object_size: The (max) size of the object being uploaded + @type object_size: C{int} + """ + + if blob_type not in ['BlockBlob', 'PageBlob']: + raise LibcloudError('Invalid blob type', driver=self) + + if blob_type == 'PageBlob': + if not object_size: + raise LibcloudError('Max blob size is mandatory for page blob', + driver=self) + + if object_size % AZURE_PAGE_CHUNK_SIZE: + raise LibcloudError('Max blob size is not aligned to ' + 'page boundary', driver=self) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True, ex_blob_type=None, ex_use_lease=False): + """ + Upload an object currently located on a disk. + + @inherits: L{StorageDriver.upload_object} + + @param ex_blob_type: Storage class + @type ex_blob_type: C{str} + + @param ex_use_lease: Indicates if we must take a lease before upload + @type ex_use_lease: C{bool} + """ + + if ex_blob_type is None: + ex_blob_type = self.ex_blob_type + + # Get the size of the file + file_size = os.stat(file_path).st_size + + # The presumed size of the object + object_size = file_size + + self._check_values(ex_blob_type, file_size) + + with file(file_path, 'rb') as file_handle: + iterator = iter(file_handle) + + # If size is greater than 64MB or type is Page, upload in chunks + if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE: + # For chunked upload of block blobs, the initial size must + # be 0. + if ex_blob_type == 'BlockBlob': + object_size = None + + object_path = self._get_object_path(container, object_name) + + upload_func = self._upload_in_chunks + upload_func_kwargs = {'iterator': iterator, + 'object_path': object_path, + 'blob_type': ex_blob_type, + 'lease': None} + else: + upload_func = self._stream_data + upload_func_kwargs = {'iterator': iterator, + 'chunked': False, + 'calculate_hash': verify_hash} + + return self._put_object(container=container, + object_name=object_name, + object_size=object_size, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + file_path=file_path, extra=extra, + verify_hash=verify_hash, + blob_type=ex_blob_type, + use_lease=ex_use_lease) + + def upload_object_via_stream(self, iterator, container, object_name, + verify_hash=False, extra=None, + ex_use_lease=False, ex_blob_type=None, + ex_page_blob_size=None): + """ + @inherits: L{StorageDriver.upload_object_via_stream} + + @param ex_blob_type: Storage class + @type ex_blob_type: C{str} + + @param ex_page_blob_size: The maximum size to which the + page blob can grow to + @type ex_page_blob_size: C{int} + + @param ex_use_lease: Indicates if we must take a lease before upload + @type ex_use_lease: C{bool} + """ + + if ex_blob_type is None: + ex_blob_type = self.ex_blob_type + + self._check_values(ex_blob_type, ex_page_blob_size) + + object_path = self._get_object_path(container, object_name) + + upload_func = self._upload_in_chunks + upload_func_kwargs = {'iterator': iterator, + 'object_path': object_path, + 'blob_type': ex_blob_type, + 'lease': None} + + return self._put_object(container=container, + object_name=object_name, + object_size=ex_page_blob_size, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, verify_hash=verify_hash, + blob_type=ex_blob_type, + use_lease=ex_use_lease) + + def delete_object(self, obj): + """ + @inherits: L{StorageDriver.delete_object} + """ + object_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(object_path, method='DELETE') + + if response.status == httplib.ACCEPTED: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=obj.name) + + return False + + def _update_metadata(self, headers, meta_data): + """ + Update the given metadata in the headers + + @param headers: The headers dictionary to be updated + @type headers: C{dict} + + @param meta_data: Metadata key value pairs + @type meta_data: C{dict} + """ + for key, value in list(meta_data.items()): + key = 'x-ms-meta-%s' % (key) + headers[key] = value + + def _prepare_upload_headers(self, object_name, object_size, + extra, meta_data, blob_type): + """ + Prepare headers for uploading an object + + @param object_name: The full name of the object being updated + @type object_name: C{str} + + @param object_size: The size of the object. In case of PageBlobs, + this indicates the maximum size the blob can grow to + @type object_size: C{int} + + @param extra: Extra control data for the upload + @type extra: C{dict} + + @param meta_data: Metadata key value pairs + @type meta_data: C{dict} + + @param blob_type: Page or Block blob type + @type blob_type: C{str} + """ + headers = {} + + if blob_type is None: + blob_type = self.ex_blob_type + + headers['x-ms-blob-type'] = blob_type + + self._update_metadata(headers, meta_data) + + if object_size is not None: + headers['Content-Length'] = object_size + + if blob_type == 'PageBlob': + headers['Content-Length'] = 0 + headers['x-ms-blob-content-length'] = object_size + + return headers + + def _put_object(self, container, object_name, object_size, upload_func, + upload_func_kwargs, file_path=None, extra=None, + verify_hash=True, blob_type=None, use_lease=False): + """ + Control function that does the real job of uploading data to a blob + """ + extra = extra or {} + meta_data = extra.get('meta_data', {}) + content_type = extra.get('content_type', None) + + headers = self._prepare_upload_headers(object_name, object_size, + extra, meta_data, blob_type) + + object_path = self._get_object_path(container, object_name) + + # Get a lease if required and do the operations + with AzureBlobLease(self, object_path, use_lease) as lease: + if 'lease' in upload_func_kwargs: + upload_func_kwargs['lease'] = lease + + lease.update_headers(headers) + + iterator = iter('') + result_dict = self._upload_object(object_name, content_type, + upload_func, upload_func_kwargs, + object_path, headers=headers, + file_path=file_path, + iterator=iterator) + + response = result_dict['response'] + bytes_transferred = result_dict['bytes_transferred'] + data_hash = result_dict['data_hash'] + headers = response.headers + response = response.response + + if response.status != httplib.CREATED: + raise LibcloudError( + 'Unexpected status code, status_code=%s' % (response.status), + driver=self) + + server_hash = headers['content-md5'] + + if server_hash: + server_hash = binascii.hexlify(base64.b64decode(b(server_hash))) + server_hash = server_hash.decode('utf-8') + else: + # TODO: HACK - We could poll the object for a while and get + # the hash + pass + + if (verify_hash and server_hash and data_hash != server_hash): + raise ObjectHashMismatchError( + value='MD5 hash checksum does not match', + object_name=object_name, driver=self) + + return Object(name=object_name, size=bytes_transferred, + hash=headers['etag'], extra=None, + meta_data=meta_data, container=container, + driver=self) + + def ex_set_object_metadata(self, obj, meta_data): + """ + Set metadata for an object + + @param obj: The blob object + @type obj: L{Object} + + @param meta_data: Metadata key value pairs + @type meta_data: C{dict} + """ + object_path = self._get_object_path(obj.container, obj.name) + params = {'comp': 'metadata'} + headers = {} + + self._update_metadata(headers, meta_data) + + response = self.connection.request(object_path, method='PUT', + params=params, + headers=headers) + + if response.status != httplib.OK: + response.parse_error('Setting metadata') diff --git a/libcloud/storage/providers.py b/libcloud/storage/providers.py index dca7ac531c..f5d35baf0f 100644 --- a/libcloud/storage/providers.py +++ b/libcloud/storage/providers.py @@ -40,6 +40,8 @@ ('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'), Provider.LOCAL: ('libcloud.storage.drivers.local', 'LocalStorageDriver'), + Provider.AZURE_BLOBS: + ('libcloud.storage.drivers.azure_blobs', 'AzureBlobsStorageDriver'), Provider.CLOUDFILES_US: ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), diff --git a/libcloud/storage/types.py b/libcloud/storage/types.py index 4230a7bafe..f9fe4c494c 100644 --- a/libcloud/storage/types.py +++ b/libcloud/storage/types.py @@ -55,6 +55,7 @@ class Provider(object): S3_US_WEST_OREGON = 's3_us_west_oregon' NIMBUS = 'nimbus' LOCAL = 'local' + AZURE_BLOBS = 'azure_blobs' CLOUDFILES_US = 'cloudfiles_us' CLOUDFILES_UK = 'cloudfiles_uk' diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index 3ba7dc5715..df221020fe 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -44,6 +44,9 @@ HOSTVIRTUAL_PARAMS = ('key',) STORAGE_S3_PARAMS = ('key', 'secret') STORAGE_GOOGLE_STORAGE_PARAMS = ('key', 'secret') +# Azure key is b64 encoded and must be decoded before signing requests +STORAGE_AZURE_BLOBS_PARAMS = ('account', 'cGFzc3dvcmQ=') + # Loadbalancer LB_BRIGHTBOX_PARAMS = ('user', 'key') LB_ELB_PARAMS = ('access_id', 'secret', 'region') diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml b/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml new file mode 100644 index 0000000000..2d3678e697 --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml @@ -0,0 +1,29 @@ + + + 2 + + + container1 + https://account.blob.core.windows.net/container1 + + Mon, 07 Jan 2013 06:31:06 GMT + "0x8CFBAB7B4F23346" + unlocked + available + + + + + container2 + https://account.blob.core.windows.net/container2 + + Mon, 07 Jan 2013 06:31:07 GMT + "0x8CFBAB7B5B82D8E" + unlocked + available + + + + + /account/container3 + diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml b/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml new file mode 100644 index 0000000000..3c0e6c3a87 --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml @@ -0,0 +1,30 @@ + + + /account/container3 + 2 + + + container3 + https://account.blob.core.windows.net/container3 + + Mon, 07 Jan 2013 06:31:08 GMT + "0x8CFBAB7B6452A71" + unlocked + available + + + + + container4 + https://account.blob.core.windows.net/container4 + + Fri, 04 Jan 2013 08:32:41 GMT + "0x8CFB86D32305484" + unlocked + available + + + + + + diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml b/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml new file mode 100644 index 0000000000..d79dd65f16 --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml @@ -0,0 +1,8 @@ + + + + + 100 + + + diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml b/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml new file mode 100644 index 0000000000..713f8d6baa --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml @@ -0,0 +1,49 @@ + + + 2 + + + object1.txt + https://account.blob.core.windows.net/test_container/object1.txt + + Fri, 04 Jan 2013 09:48:06 GMT + 0x8CFB877BB56A6FB + 0 + application/octet-stream + + + 1B2M2Y8AsgTpgAmY7PhCfg== + + BlockBlob + unlocked + available + + + value1 + value2 + + + + object2.txt + https://account.blob.core.windows.net/test_container/object2.txt + + Sat, 05 Jan 2013 03:51:42 GMT + 0x8CFB90F1BA8CD8F + 1048576 + application/octet-stream + + + ttgbNgpWctgMJ0MPORU+LA== + + BlockBlob + unlocked + available + + + value1 + value2 + + + + 2!76!MDAwMDExIXNvbWUxMTcudHh0ITAwMDAyOCE5OTk5LTEyLTMxVDIzOjU5OjU5Ljk5OTk5OTlaIQ-- + diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml b/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml new file mode 100644 index 0000000000..973985b665 --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml @@ -0,0 +1,43 @@ + + + object3.txt + 2 + + + object3.txt + https://account.blob.core.windows.net/test_container/object3.txt + + Sat, 05 Jan 2013 03:52:08 GMT + 0x8CFB90F2B6FC022 + 1048576 + application/octet-stream + + + ttgbNgpWctgMJ0MPORU+LA== + + BlockBlob + unlocked + available + + + + + object4.txt + https://account.blob.core.windows.net/test_container/object4.txt + + Fri, 04 Jan 2013 10:20:14 GMT + 0x8CFB87C38717450 + 0 + application/octet-stream + + 1B2M2Y8AsgTpgAmY7PhCfg== + + BlockBlob + unlocked + available + + + + + + diff --git a/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml b/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml new file mode 100644 index 0000000000..2ccbf4f20a --- /dev/null +++ b/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml @@ -0,0 +1,6 @@ + + + 2 + + + diff --git a/libcloud/test/storage/test_azure_blobs.py b/libcloud/test/storage/test_azure_blobs.py new file mode 100644 index 0000000000..eb30cc4cef --- /dev/null +++ b/libcloud/test/storage/test_azure_blobs.py @@ -0,0 +1,944 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +import unittest +import tempfile + +from xml.etree import ElementTree as ET +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver +from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE +from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE +from libcloud.storage.drivers.dummy import DummyIterator + +from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 +from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS + + +try: + parse_qs = urlparse.parse_qs +except AttributeError: + import cgi + parse_qs = cgi.parse_qs + + +class AzureBlobsMockHttp(StorageMockHttp, MockHttpTestCase): + + fixtures = StorageFileFixtures('azure_blobs') + base_headers = {} + + def _UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, + '', + self.base_headers, + httplib.responses[httplib.UNAUTHORIZED]) + + def _list_containers_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_containers_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers(self, method, url, body, headers): + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if 'marker' not in query: + body = self.fixtures.load('list_containers_1.xml') + else: + body = self.fixtures.load('list_containers_2.xml') + + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_EMPTY(self, method, url, body, headers): + if method == 'DELETE': + body = '' + return (httplib.ACCEPTED, + body, + self.base_headers, + httplib.responses[httplib.ACCEPTED]) + + else: + body = self.fixtures.load('list_objects_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _new__container_INVALID_NAME(self, method, url, body, headers): + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + def _test_container(self, method, url, body, headers): + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if 'marker' not in query: + body = self.fixtures.load('list_objects_1.xml') + else: + body = self.fixtures.load('list_objects_2.xml') + + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container100(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + return (httplib.NOT_FOUND, + body, + self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _test_container200(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + headers = {} + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-meta1'] = 'value1' + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _test_container200_test(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + headers = {} + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['content-length'] = 12345 + headers['content-type'] = 'application/zip' + headers['x-ms-blob-type'] = 'Block' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-rabbits'] = 'monkeys' + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _test2_test_list_containers(self, method, url, body, headers): + # test_get_object + body = self.fixtures.load('list_containers.xml') + headers = {'content-type': 'application/zip', + 'etag': '"e31208wqsdoj329jd"', + 'x-amz-meta-rabbits': 'monkeys', + 'content-length': 12345, + 'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT' + } + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_ALREADY_EXISTS(self, method, url, body, headers): + # test_create_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.CONFLICT]) + + def _new_container(self, method, url, body, headers): + # test_create_container, test_delete_container + + headers = {} + + if method == 'PUT': + status = httplib.CREATED + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-meta1'] = 'value1' + + elif method == 'DELETE': + status = httplib.NO_CONTENT + + return (status, + body, + headers, + httplib.responses[status]) + + def _new_container_DOESNT_EXIST(self, method, url, body, headers): + # test_delete_container + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): + # test_delete_container_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, + headers): + # test_delete_object_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_delete_object + return (httplib.ACCEPTED, + body, + headers, + httplib.responses[httplib.ACCEPTED]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_block(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_page(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_blocklist(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_lease(self, method, url, + body, headers): + # test_upload_object_success + action = headers['x-ms-lease-action'] + rheaders = {'x-ms-lease-id': 'someleaseid'} + body = '' + + if action == 'acquire': + return (httplib.CREATED, + body, + rheaders, + httplib.responses[httplib.CREATED]) + + else: + if headers.get('x-ms-lease-id', None) != 'someleaseid': + return (httplib.BAD_REQUEST, + body, + rheaders, + httplib.responses[httplib.BAD_REQUEST]) + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.CREATED]) + + +class AzureBlobsMockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('azure_blobs') + + def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url, + body, headers): + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + + # test_upload_object_invalid_hash1 + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_upload_object_invalid_file_size + body = self._generate_random_data(1000) + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, + body, headers): + # test_upload_object_invalid_file_size + body = '' + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + +class AzureBlobsTests(unittest.TestCase): + driver_type = AzureBlobsStorageDriver + driver_args = STORAGE_AZURE_BLOBS_PARAMS + mock_response_klass = AzureBlobsMockHttp + mock_raw_response_klass = AzureBlobsMockRawResponse + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args) + + def setUp(self): + self.driver_type.connectionCls.conn_classes = (None, + self.mock_response_klass) + self.driver_type.connectionCls.rawResponseCls = \ + self.mock_raw_response_klass + self.mock_response_klass.type = None + self.mock_raw_response_klass.type = None + self.driver = self.create_driver() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_invalid_credentials(self): + self.mock_response_klass.type = 'UNAUTHORIZED' + try: + self.driver.list_containers() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('Exception was not thrown') + + def test_list_containers_empty(self): + self.mock_response_klass.type = 'list_containers_EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + def test_list_containers_success(self): + self.mock_response_klass.type = 'list_containers' + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + containers = self.driver.list_containers() + self.assertEqual(len(containers), 4) + + self.assertTrue('last_modified' in containers[1].extra) + self.assertTrue('url' in containers[1].extra) + self.assertTrue('etag' in containers[1].extra) + self.assertTrue('lease' in containers[1].extra) + self.assertTrue('meta_data' in containers[1].extra) + + def test_list_container_objects_empty(self): + self.mock_response_klass.type = 'EMPTY' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + def test_list_container_objects_success(self): + self.mock_response_klass.type = None + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 4) + + obj = objects[1] + self.assertEqual(obj.name, 'object2.txt') + self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F') + self.assertEqual(obj.size, 1048576) + self.assertEqual(obj.container.name, 'test_container') + self.assertTrue('meta1' in obj.meta_data) + self.assertTrue('meta2' in obj.meta_data) + self.assertTrue('last_modified' in obj.extra) + self.assertTrue('content_type' in obj.extra) + self.assertTrue('content_encoding' in obj.extra) + self.assertTrue('content_language' in obj.extra) + + def test_get_container_doesnt_exist(self): + self.mock_response_klass.type = None + try: + self.driver.get_container(container_name='test_container100') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_container_success(self): + self.mock_response_klass.type = None + container = self.driver.get_container( + container_name='test_container200') + + self.assertTrue(container.name, 'test_container200') + self.assertTrue(container.extra['etag'], '0x8CFB877BB56A6FB') + self.assertTrue(container.extra['last_modified'], + 'Fri, 04 Jan 2013 09:48:06 GMT') + self.assertTrue(container.extra['lease']['status'], 'unlocked') + self.assertTrue(container.extra['lease']['state'], 'available') + self.assertTrue(container.extra['meta_data']['meta1'], 'value1') + + def test_get_object_container_doesnt_exist(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = None + try: + self.driver.get_object(container_name='test_container100', + object_name='test') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = None + obj = self.driver.get_object(container_name='test_container200', + object_name='test') + + self.assertEqual(obj.name, 'test') + self.assertEqual(obj.container.name, 'test_container200') + self.assertEqual(obj.size, 12345) + self.assertEqual(obj.hash, '0x8CFB877BB56A6FB') + self.assertEqual(obj.extra['last_modified'], + 'Fri, 04 Jan 2013 09:48:06 GMT') + self.assertEqual(obj.extra['content_type'], 'application/zip') + self.assertEqual(obj.meta_data['rabbits'], 'monkeys') + + def test_create_container_invalid_name(self): + # invalid container name + self.mock_response_klass.type = 'INVALID_NAME' + try: + self.driver.create_container(container_name='new--container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_already_exists(self): + # container with this name already exists + self.mock_response_klass.type = 'ALREADY_EXISTS' + try: + self.driver.create_container(container_name='new-container') + except ContainerAlreadyExistsError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + # success + self.mock_response_klass.type = None + name = 'new-container' + container = self.driver.create_container(container_name=name) + self.assertEqual(container.name, name) + + def test_delete_container_doesnt_exist(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'DOESNT_EXIST' + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_not_empty(self): + self.mock_response_klass.type = None + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_success(self): + self.mock_response_klass.type = 'EMPTY' + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + self.assertTrue(self.driver.delete_container(container=container)) + + def test_delete_container_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Container does not exist but an exception was not' + + 'thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_invalid_file_already_exists(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + try: + self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_download_object_as_stream_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + + stream = self.driver.download_object_as_stream(obj=obj, + chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_invalid_ex_blob_type(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True, + ex_blob_type='invalid-blob') + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(str(e).lower().find('invalid blob type') != -1) + else: + self.fail('Exception was not thrown') + + def test_upload_object_invalid_md5(self): + # Invalid md5 is returned by azure + self.mock_raw_response_klass.type = 'INVALID_HASH' + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + file_path = os.path.abspath(__file__) + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + + def test_upload_small_block_object_success(self): + file_path = os.path.abspath(__file__) + file_size = os.stat(file_path).st_size + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + def test_upload_big_block_object_success(self): + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_BLOCK_MAX_SIZE + 1 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + + def test_upload_page_object_success(self): + self.mock_response_klass.use_param = None + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 4 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + + def test_upload_page_object_failure(self): + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 2 + 1 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + + try: + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob') + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(str(e).lower().find('not aligned') != -1) + + os.remove(file_path) + + def test_upload_small_block_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = os.path.abspath(__file__) + file_size = os.stat(file_path).st_size + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + self.mock_response_klass.use_param = None + + def test_upload_big_block_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_BLOCK_MAX_SIZE * 2 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob', + ex_use_lease=False) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + self.mock_response_klass.use_param = None + + def test_upload_page_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 4 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + self.mock_response_klass.use_param = None + + def test_upload_blob_object_via_stream(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + self.mock_response_klass.use_param = None + + def test_upload_blob_object_via_stream_with_lease(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='BlockBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + self.mock_response_klass.use_param = None + + def test_upload_page_object_via_stream(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + blob_size = AZURE_PAGE_CHUNK_SIZE + iterator = DummyIterator(data=['1'] * blob_size) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='PageBlob', + ex_page_blob_size=blob_size) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, blob_size) + self.mock_response_klass.use_param = None + + def test_upload_page_object_via_stream_with_lease(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + blob_size = AZURE_PAGE_CHUNK_SIZE + iterator = DummyIterator(data=['1'] * blob_size) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='PageBlob', + ex_page_blob_size=blob_size, + ex_use_lease=True) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, blob_size) + + def test_delete_object_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + + result = self.driver.delete_object(obj=obj) + self.assertTrue(result) + +if __name__ == '__main__': + sys.exit(unittest.main()) From 86cf5b8cd5764dc1fd139226ae4b5616105230ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 9 Feb 2013 12:42:52 +0000 Subject: [PATCH 020/143] Fix an issue introduced when removing "datacenter" stuff. Part of LIBCLOUD-290. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1444352 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/compute/drivers/ec2.py | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index eb99e174b0..db2e8db4cf 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1399,6 +1399,10 @@ class EC2NodeDriver(BaseEC2NodeDriver): website = 'http://aws.amazon.com/ec2/' path = '/' + region_name = 'us-east-1' + country = 'USA' + api_name = 'ec2_us_east' + features = {'create_node': ['ssh_key']} NODE_STATE_MAP = { @@ -1408,25 +1412,6 @@ class EC2NodeDriver(BaseEC2NodeDriver): 'terminated': NodeState.TERMINATED } - def __init__(self, key, secret=None, secure=True, host=None, port=None, - datacenter='us-east-1', **kwargs): - if hasattr(self, '_datacenter'): - datacenter = self._datacenter - - if datacenter not in VALID_EC2_DATACENTERS: - raise ValueError('Invalid datacenter: %s' % (datacenter)) - - details = REGION_DETAILS[datacenter] - self.region_name = datacenter - self.api_name = details['api_name'] - self.country = details['country'] - - self.connectionCls.host = details['endpoint'] - - super(EC2NodeDriver, self).__init__(key=key, secret=secret, - secure=secure, host=host, - port=port, **kwargs) - class IdempotentParamError(LibcloudError): """ From fbb27bca0acb57e75b0f9f78e93f6ac9058269ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 11 Feb 2013 05:27:57 +0000 Subject: [PATCH 021/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1444658 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 + libcloud/compute/drivers/opennebula.py | 120 +++++++++++++++--- .../instance_type_collection.xml | 6 + .../opennebula_3_8/instance_type_large.xml | 6 + .../opennebula_3_8/instance_type_medium.xml | 6 + .../opennebula_3_8/instance_type_small.xml | 6 + libcloud/test/compute/test_opennebula.py | 90 +++++++++++++ 7 files changed, 218 insertions(+), 19 deletions(-) create mode 100644 libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml diff --git a/CHANGES b/CHANGES index e36047422f..8c20adbe6f 100644 --- a/CHANGES +++ b/CHANGES @@ -153,6 +153,9 @@ Changes with Apache Libcloud 0.12.0: driver by providing ex_image_ids argument. (LIBCLOUD-294) [Chris Psaltis, Joseph Hall] + - Add support for OpenNebula 3.8. (LIBCLOUD-295) + [Guillaume ZITTA] + *) Storage - Add a new local storage driver. diff --git a/libcloud/compute/drivers/opennebula.py b/libcloud/compute/drivers/opennebula.py index 54b9898dc9..7700198094 100644 --- a/libcloud/compute/drivers/opennebula.py +++ b/libcloud/compute/drivers/opennebula.py @@ -46,11 +46,13 @@ 'OpenNebula_1_4_NodeDriver', 'OpenNebula_2_0_NodeDriver', 'OpenNebula_3_0_NodeDriver', - 'OpenNebula_3_2_NodeDriver'] + 'OpenNebula_3_2_NodeDriver', + 'OpenNebula_3_8_NodeDriver'] API_HOST = '' API_PORT = (4567, 443) API_SECURE = True +API_PLAIN_AUTH = False DEFAULT_API_VERSION = '3.2' @@ -146,13 +148,20 @@ def parse_error(self): class OpenNebulaConnection(ConnectionUserAndKey): """ Connection class for the OpenNebula.org driver. + with plain_auth support """ host = API_HOST port = API_PORT secure = API_SECURE + plain_auth = API_PLAIN_AUTH responseCls = OpenNebulaResponse + def __init__(self, *args, **kwargs): + if 'plain_auth' in kwargs: + self.plain_auth = kwargs.pop('plain_auth') + super(OpenNebulaConnection, self).__init__(*args, **kwargs) + def add_default_headers(self, headers): """ Add headers required by the OpenNebula.org OCCI interface. @@ -166,10 +175,13 @@ def add_default_headers(self, headers): @rtype: C{dict} @return: Dictionary containing updated headers. """ - pass_sha1 = hashlib.sha1(b(self.key)).hexdigest() + if self.plain_auth: + passwd = self.key + else: + passwd = hashlib.sha1(b(self.key)).hexdigest() headers['Authorization'] =\ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, - pass_sha1))).decode('utf-8')) + passwd))).decode('utf-8')) return headers @@ -289,6 +301,12 @@ def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION, cls = OpenNebula_3_0_NodeDriver elif api_version in ['3.2']: cls = OpenNebula_3_2_NodeDriver + elif api_version in ['3.8']: + cls = OpenNebula_3_8_NodeDriver + if 'plain_auth' not in kwargs: + kwargs['plain_auth'] = cls.plain_auth + else: + cls.plain_auth = kwargs['plain_auth'] else: raise NotImplementedError( "No OpenNebulaNodeDriver found for API version %s" % @@ -1019,22 +1037,86 @@ def _to_sizes(self, object): @rtype: C{list} of L{OpenNebulaNodeSize} """ sizes = [] - ids = 1 + size_id = 1 + + attributes = [('name', str, None), ('ram', int, 'MEMORY'), + ('cpu', float, None), ('vcpu', float, None), + ('disk', str, None), ('bandwidth', float, None), + ('price', float, None)] + for element in object.findall('INSTANCE_TYPE'): - sizes.append(OpenNebulaNodeSize(id=ids, - name=element.findtext('NAME'), - ram=int(element.findtext('MEMORY')) - if element.findtext('MEMORY', None) else None, - cpu=float(element.findtext('CPU')) - if element.findtext('CPU', None) else None, - vcpu=int(element.findtext('VCPU')) - if element.findtext('VCPU', None) else None, - disk=element.findtext('DISK', None), - bandwidth=float(element.findtext('BANDWIDTH')) - if element.findtext('BANDWIDTH', None) else None, - price=float(element.findtext('PRICE')) - if element.findtext('PRICE', None) else None, - driver=self)) - ids += 1 + size_kwargs = {'id': size_id, 'driver': self} + values = self._get_attributes_values(attributes=attributes, + element=element) + size_kwargs.update(values) + + size = OpenNebulaNodeSize(**size_kwargs) + sizes.append(size) + size_id += 1 return sizes + + def _get_attributes_values(self, attributes, element): + values = {} + + for attribute_name, attribute_type, alias in attributes: + key = alias if alias else attribute_name.upper() + value = element.findtext(key) + + if value is not None: + value = attribute_type(value) + + values[attribute_name] = value + + return values + + +class OpenNebula_3_8_NodeDriver(OpenNebula_3_2_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.8. + """ + + plain_auth = API_PLAIN_AUTH + + def _to_sizes(self, object): + """ + Request a list of instance types and convert that list to a list of + OpenNebulaNodeSize objects. + + Request a list of instance types from the OpenNebula web interface, + and issue a request to convert each XML object representation of an + instance type to an OpenNebulaNodeSize object. + + @return: List of instance types. + @rtype: C{list} of L{OpenNebulaNodeSize} + """ + sizes = [] + size_id = 1 + + attributes = [('name', str, None), ('ram', int, 'MEMORY'), + ('cpu', float, None), ('vcpu', float, None), + ('disk', str, None), ('bandwidth', float, None), + ('price', float, None)] + + for element in object.findall('INSTANCE_TYPE'): + element = self.connection.request( + ('/instance_type/%s') % (element.attrib['name'])).object + + size_kwargs = {'id': size_id, 'driver': self} + values = self._get_attributes_values(attributes=attributes, + element=element) + size_kwargs.update(values) + + size = OpenNebulaNodeSize(**size_kwargs) + sizes.append(size) + size_id += 1 + return sizes + + def _ex_connection_class_kwargs(self): + """ + Set plain_auth as an extra L{OpenNebulaConnection_3_8} argument + + @return: C{dict} of L{OpenNebulaConnection_3_8} input arguments + """ + + return {'plain_auth': self.plain_auth} diff --git a/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml new file mode 100644 index 0000000000..97523ac41b --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml new file mode 100644 index 0000000000..141d5b666b --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml @@ -0,0 +1,6 @@ + + + large + 8 + 8192 + diff --git a/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml new file mode 100644 index 0000000000..97bd72f3ec --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml @@ -0,0 +1,6 @@ + + + medium + 4 + 4096 + diff --git a/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml new file mode 100644 index 0000000000..346e640f40 --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml @@ -0,0 +1,6 @@ + + + small + 1 + 1024 + diff --git a/libcloud/test/compute/test_opennebula.py b/libcloud/test/compute/test_opennebula.py index bbf366c47f..9cee04d4ee 100644 --- a/libcloud/test/compute/test_opennebula.py +++ b/libcloud/test/compute/test_opennebula.py @@ -631,6 +631,56 @@ def test_list_sizes(self): self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) +class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin): + """ + OpenNebula.org test suite for OpenNebula v3.8. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_8_MockHttp, OpenNebula_3_8_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',)) + + def test_list_sizes(self): + """ + Test ex_list_networks functionality. + """ + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.name, 'small') + self.assertEqual(size.ram, 1024) + self.assertEqual(size.cpu, 1) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + size = sizes[1] + self.assertEqual(size.id, '2') + self.assertEqual(size.name, 'medium') + self.assertEqual(size.ram, 4096) + self.assertEqual(size.cpu, 4) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + size = sizes[2] + self.assertEqual(size.id, '3') + self.assertEqual(size.name, 'large') + self.assertEqual(size.ram, 8192) + self.assertEqual(size.cpu, 8) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + class OpenNebula_1_4_MockHttp(MockHttp): """ @@ -1019,5 +1069,45 @@ def _instance_type(self, method, url, body, headers): body = self.fixtures_3_2.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp): + """ + Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver. + """ + + fixtures_3_8 = ComputeFileFixtures('opennebula_3_8') + + def _instance_type(self, method, url, body, headers): + """ + Instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_small(self, method, url, body, headers): + """ + Small instance type. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_small.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_medium(self, method, url, body, headers): + """ + Medium instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_medium.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_large(self, method, url, body, headers): + """ + Large instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_large.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) From 2572305b2954e901558bd801e331bfd7d105a168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 11 Feb 2013 10:00:33 +0000 Subject: [PATCH 022/143] Bump verison to 0.12.1. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1444701 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- libcloud/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 8c20adbe6f..90467fe25c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud 0.12.0: +Changes with Apache Libcloud 0.12.1: *) General diff --git a/libcloud/__init__.py b/libcloud/__init__.py index f0b7fe2491..0c1b23955c 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.0' +__version__ = '0.12.1' try: import paramiko From 37849e29b53d3817b8fb6a744a7566b8877e9b91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 11 Feb 2013 10:05:13 +0000 Subject: [PATCH 023/143] Correctly prune secrets.py from the release tarball. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1444708 13f79535-47bb-0310-9956-ffa450edef68 --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index c7a0554828..e85119108e 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,7 +9,6 @@ include RELEASING include README include tox.ini include libcloud/data/pricing.json -prune libcloud/test/secrets.py include demos/* include libcloud/test/*.py include libcloud/test/pricing_test.json @@ -22,3 +21,4 @@ include libcloud/test/compute/fixtures/*/* include libcloud/test/storage/fixtures/*/* include libcloud/test/loadbalancer/fixtures/*/* include libcloud/test/dns/fixtures/*/* +prune libcloud/test/secrets.py From 6083a7ee78d79772e824fb68e06587b024c90cd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 18 Feb 2013 18:06:23 +0000 Subject: [PATCH 024/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1447408 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++++ libcloud/compute/drivers/openstack.py | 19 ++++++++++++++----- .../openstack_v1.1/_servers_detail.json | 2 +- libcloud/test/compute/test_openstack.py | 7 ++++--- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/CHANGES b/CHANGES index 90467fe25c..1517f31ffc 100644 --- a/CHANGES +++ b/CHANGES @@ -156,6 +156,11 @@ Changes with Apache Libcloud 0.12.1: - Add support for OpenNebula 3.8. (LIBCLOUD-295) [Guillaume ZITTA] + - Improve public and private IP address handling in OpenStack 1.1 driver. + Assume every IP address which doesn't have a label "public" or "internet" + is private. (LIBCLOUD-297) + [Grischa Meyer, Tomaz Muraus] + *) Storage - Add a new local storage driver. diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 25c5545f53..abf5bda2bc 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1596,16 +1596,25 @@ def _to_node_from_obj(self, obj): return self._to_node(obj['server']) def _to_node(self, api_node): + public_networks_labels = ['public', 'internet'] + + public_ips, private_ips = [], [] + + for label, values in api_node['addresses'].items(): + ips = [v['addr'] for v in values] + + if label in public_networks_labels: + public_ips.extend(ips) + else: + private_ips.extend(ips) + return Node( id=api_node['id'], name=api_node['name'], state=self.NODE_STATE_MAP.get(api_node['status'], NodeState.UNKNOWN), - public_ips=[addr_desc['addr'] for addr_desc in - chain(api_node['addresses'].get('public', []), - api_node['addresses'].get('internet', []))], - private_ips=[addr_desc['addr'] for addr_desc in - api_node['addresses'].get('private', [])], + public_ips=public_ips, + private_ips=private_ips, driver=self, extra=dict( hostId=api_node['hostId'], diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json index 990aad9905..91c2f0d290 100644 --- a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json @@ -1 +1 @@ -{"servers": [{"status": "BUILD", "updated": "2011-10-11T00:50:04Z", "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", "user_id": "rs-reach", "name": "lc-test-2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.35"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe47:788a"}], "private": [{"version": 4, "addr": "10.182.64.34"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe60:187d"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:51:39Z", "uuid": "02786501-714e-40af-8342-9c17eccb166d", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 25, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12065, "metadata": {}}, {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}]} \ No newline at end of file +{"servers": [{"status": "BUILD", "updated": "2011-10-11T00:50:04Z", "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", "user_id": "rs-reach", "name": "lc-test-2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.35"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe47:788a"}], "private": [{"version": 4, "addr": "10.182.64.34"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe60:187d"}], "mynetwork": [{"version": 4, "addr": "12.16.18.28"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:51:39Z", "uuid": "02786501-714e-40af-8342-9c17eccb166d", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 25, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12065, "metadata": {}}, {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}]} diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index ba6ac382aa..a73f1ab22c 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -161,7 +161,7 @@ def test_list_nodes(self): self.assertEqual(len(ret), 1) node = ret[0] self.assertEqual('67.23.21.33', node.public_ips[0]) - self.assertEqual('10.176.168.218', node.private_ips[0]) + self.assertTrue('10.176.168.218' in node.private_ips) self.assertEqual(node.extra.get('flavorId'), '1') self.assertEqual(node.extra.get('imageId'), '11') self.assertEqual(type(node.extra.get('metadata')), type(dict())) @@ -654,8 +654,9 @@ def test_list_nodes(self): self.assertEqual('12065', node.id) self.assertEqual('50.57.94.35', node.public_ips[0]) self.assertEqual('2001:4801:7808:52:16:3eff:fe47:788a', node.public_ips[1]) - self.assertEqual('10.182.64.34', node.private_ips[0]) - self.assertEqual('fec0:4801:7808:52:16:3eff:fe60:187d', node.private_ips[1]) + self.assertTrue('10.182.64.34' in node.private_ips) + self.assertTrue('12.16.18.28' in node.private_ips) + self.assertTrue('fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips) self.assertEqual(node.extra.get('flavorId'), '2') self.assertEqual(node.extra.get('imageId'), '7') From c115bd14ebbe3c9cbffac0c69ce0fe9c2eb4c126 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 21 Feb 2013 02:52:30 +0000 Subject: [PATCH 025/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1448494 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/utils/py3.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 4537f7a486..3a41bc12eb 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -48,17 +48,22 @@ import urllib as urllib2 import urllib.parse as urlparse import xmlrpc.client as xmlrpclib + from urllib.parse import quote as urlquote from urllib.parse import unquote as urlunquote from urllib.parse import urlencode as urlencode from os.path import relpath + from imp import reload + + from builtins import bytes + from builtins import next + basestring = str def method_type(callable, instance, klass): return types.MethodType(callable, instance or klass()) - bytes = __builtins__['bytes'] def b(s): if isinstance(s, str): return s.encode('utf-8') @@ -70,7 +75,7 @@ def byte(n): # assume n is a Latin-1 string of length 1 return ord(n) u = str - next = __builtins__['next'] + def dictvalues(d): return list(d.values()) @@ -87,6 +92,8 @@ def tostring(node): from urllib import unquote as urlunquote from urllib import urlencode as urlencode + from __builtin__ import reload + if not PY25: from os.path import relpath @@ -122,8 +129,3 @@ def relpath(path, start=posixpath.curdir): if not rel_list: return posixpath.curdir return posixpath.join(*rel_list) - -if PY32: - from imp import reload -else: - from __builtin__ import reload From a340ba1742544a662a23f268fdff9979222a602f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 25 Feb 2013 15:27:29 +0000 Subject: [PATCH 026/143] Fix a regression which could cause test to fail under some circumstances because hash ordering is not guaranteed. Part of LIBCLOUD-245. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1449746 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/test/storage/test_cloudfiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index 89e046fc9e..9f239b1337 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -640,7 +640,7 @@ def test_ex_get_object_temp_url(self, time): ret = self.driver.ex_get_object_temp_url(obj, 'GET') temp_url = 'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS/foo_bar_container/foo_bar_object?temp_url_expires=60&temp_url_sig=%s' % (sig) - self.assertEquals(ret, temp_url) + self.assertEquals(''.join(sorted(ret)), ''.join(sorted(temp_url))) def test_ex_get_object_temp_url_no_key_raises_key_error(self): self.driver.ex_get_meta_data = mock.Mock() From fb43deaae1c8c8d27380370a62b785b08f993033 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 25 Feb 2013 19:46:02 +0000 Subject: [PATCH 027/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1449848 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 1517f31ffc..3cd94440de 100644 --- a/CHANGES +++ b/CHANGES @@ -161,6 +161,10 @@ Changes with Apache Libcloud 0.12.1: is private. (LIBCLOUD-297) [Grischa Meyer, Tomaz Muraus] + - Fix create_node in OpenStack driver to work correctly if 'adminPass' + attribute is not present in the response. (LIBCLOUD-292) + [Gavin McCance, Tomaz Muraus] + *) Storage - Add a new local storage driver. From cea5dec72240448d1b2742d5f71d5a8e3c74ac68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 25 Feb 2013 22:22:35 +0000 Subject: [PATCH 028/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1449932 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 3cd94440de..852b05eb2c 100644 --- a/CHANGES +++ b/CHANGES @@ -161,6 +161,10 @@ Changes with Apache Libcloud 0.12.1: is private. (LIBCLOUD-297) [Grischa Meyer, Tomaz Muraus] + - Add missing 'deletd' -> terminated mapping to OpenStack driver. + (LIBCLOUD-276) + [Jayyy V] + - Fix create_node in OpenStack driver to work correctly if 'adminPass' attribute is not present in the response. (LIBCLOUD-292) [Gavin McCance, Tomaz Muraus] From deab48bcc829f119bd6a168cb781a2e44e9c5125 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 26 Feb 2013 23:01:51 +0000 Subject: [PATCH 029/143] Update changes. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1450512 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 852b05eb2c..62792fc446 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,14 @@ -*- coding: utf-8 -*- +Changes with Apache Libcloud in development: + + *) Compute + + - Improve public and private IP address handling in OpenStack 1.1 driver. + Assume every IP address which doesn't have a label "public" or "internet" + is private. (LIBCLOUD-297) + [Grischa Meyer, Tomaz Muraus] + Changes with Apache Libcloud 0.12.1: *) General @@ -156,11 +165,6 @@ Changes with Apache Libcloud 0.12.1: - Add support for OpenNebula 3.8. (LIBCLOUD-295) [Guillaume ZITTA] - - Improve public and private IP address handling in OpenStack 1.1 driver. - Assume every IP address which doesn't have a label "public" or "internet" - is private. (LIBCLOUD-297) - [Grischa Meyer, Tomaz Muraus] - - Add missing 'deletd' -> terminated mapping to OpenStack driver. (LIBCLOUD-276) [Jayyy V] From e42da62ca359ac939aa7e30dd92ac829070c5ec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 26 Feb 2013 23:22:40 +0000 Subject: [PATCH 030/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1450524 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 6 ++++++ libcloud/dns/drivers/rackspace.py | 13 +++++++------ libcloud/test/dns/test_rackspace.py | 1 + 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 62792fc446..24e889cde0 100644 --- a/CHANGES +++ b/CHANGES @@ -9,6 +9,12 @@ Changes with Apache Libcloud in development: is private. (LIBCLOUD-297) [Grischa Meyer, Tomaz Muraus] + *) Compute + + - Allow user to specify 'priority' extra argument when creating a MX or SRV + record. + [Brian Jinwright, Tomaz Muraus] + Changes with Apache Libcloud 0.12.1: *) General diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index 4943e93c1f..dd30171bc7 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -34,7 +34,7 @@ from libcloud.dns.base import DNSDriver, Zone, Record VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1'] -VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment'] +VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment', 'priority'] class RackspaceDNSResponse(OpenStack_1_1_Response): @@ -241,6 +241,9 @@ def create_record(self, name, zone, type, data, extra=None): if 'ttl' in extra: data['ttl'] = int(extra['ttl']) + if 'priority' in extra: + data['priority'] = int(extra['priority']) + payload = {'records': [data]} self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.async_request(action='/domains/%s/records' @@ -340,11 +343,9 @@ def _to_record(self, data, zone): record_data = data['data'] extra = {'fqdn': fqdn} - if 'ttl' in data: - extra['ttl'] = data['ttl'] - - if 'comment' in data: - extra['comment'] = data['comment'] + for key in VALID_RECORD_EXTRA_PARAMS: + if key in data: + extra[key] = data[key] record = Record(id=str(id), name=name, type=type, data=record_data, zone=zone, driver=self, extra=extra) diff --git a/libcloud/test/dns/test_rackspace.py b/libcloud/test/dns/test_rackspace.py index a2b99fcfda..ab61a2035a 100644 --- a/libcloud/test/dns/test_rackspace.py +++ b/libcloud/test/dns/test_rackspace.py @@ -109,6 +109,7 @@ def test_list_records_success(self): self.assertEqual(records[0].type, RecordType.A) self.assertEqual(records[0].data, '127.7.7.7') self.assertEqual(records[0].extra['ttl'], 777) + self.assertEqual(records[0].extra['comment'], 'lulz') self.assertEqual(records[0].extra['fqdn'], 'test3.%s' % (records[0].zone.domain)) From a95d630c015111db63e592be8d1fff56631362b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 27 Feb 2013 22:40:27 +0000 Subject: [PATCH 031/143] Update changes. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1451010 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 24e889cde0..64d0b36349 100644 --- a/CHANGES +++ b/CHANGES @@ -2,15 +2,21 @@ Changes with Apache Libcloud in development: - *) Compute + *) General - - Improve public and private IP address handling in OpenStack 1.1 driver. - Assume every IP address which doesn't have a label "public" or "internet" - is private. (LIBCLOUD-297) - [Grischa Meyer, Tomaz Muraus] + - Fix Python 3.x related regressions. (LIBCLOUD-245) + Reported by Arfrever Frehtes Taifersar Arahesis. + [Tomaz Muraus] *) Compute + - Improve public and private IP address handling in OpenStack 1.1 driver. + Assume every IP address which doesn't have a label "public" or "internet" + is private. (LIBCLOUD-297) + [Grischa Meyer, Tomaz Muraus] + + *) DNS + - Allow user to specify 'priority' extra argument when creating a MX or SRV record. [Brian Jinwright, Tomaz Muraus] From 982cb7cfbaee6c23cba500ab9e9b72ac2343eaab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 27 Feb 2013 22:55:02 +0000 Subject: [PATCH 032/143] Bump version to 0.12.2. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1451013 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- libcloud/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 64d0b36349..9be82d5258 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud in development: +Changes with Apache Libcloud 0.12.2: *) General diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 0c1b23955c..d39656cde0 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.1' +__version__ = '0.12.2' try: import paramiko From 3ed5a35bee778c45d1f78bea13d1881089e728df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 7 Mar 2013 06:36:41 +0000 Subject: [PATCH 033/143] Backport commit r1453705 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1453708 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 10 ++++++++++ libcloud/compute/deployment.py | 9 +++++++-- libcloud/compute/ssh.py | 6 ++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 9be82d5258..cb1e68dbe8 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,16 @@ Changes with Apache Libcloud 0.12.2: Reported by Arfrever Frehtes Taifersar Arahesis. [Tomaz Muraus] + - Improve deploy code to work correctly if the ssh user doesn't have access + to the /root directory. + + Previously the ScriptDeployment script was stored in /root folder by + default. Now it's stored in users home directory under filename + ~/libcloud_deploymeny_.sh. (LIBCLOUD-302) + + Reported by rotem on #libcloud. + [Tomaz Muraus] + *) Compute - Improve public and private IP address handling in OpenStack 1.1 driver. diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index 53bfddb314..8c5bd8edbe 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -91,7 +91,7 @@ def __init__(self, source, target): @keyword source: Local path of file to be installed @type target: C{str} - @keyword target: Path to install file on node + @keyword target: Path to install file on node """ self.source = source self.target = target @@ -137,8 +137,11 @@ def __init__(self, script, name=None, delete=False): self.exit_status = None self.delete = delete self.name = name + if self.name is None: - self.name = "/root/deployment_%s.sh" % (binascii.hexlify(os.urandom(4))) + # File is put under user's home directory + # (~/libcloud_deployment_.sh) + self.name = 'libcloud_deployment_%s.sh' % (binascii.hexlify(os.urandom(4))) def run(self, node, client): """ @@ -149,8 +152,10 @@ def run(self, node, client): client.put(path=self.name, chmod=int('755', 8), contents=self.script) self.stdout, self.stderr, self.exit_status = client.run(self.name) + if self.delete: client.delete(self.name) + return node diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index f171a8fde4..39a037a18b 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -157,8 +157,13 @@ def put(self, path, contents=None, chmod=None, mode='w'): sftp = self.client.open_sftp() # less than ideal, but we need to mkdir stuff otherwise file() fails head, tail = psplit(path) + if path[0] == "/": sftp.chdir("/") + else: + # Relative path - start from a home directory (~) + sftp.chdir('.') + for part in head.split("/"): if part != "": try: @@ -168,6 +173,7 @@ def put(self, path, contents=None, chmod=None, mode='w'): # catch EEXIST consistently *sigh* pass sftp.chdir(part) + ak = sftp.file(tail, mode=mode) ak.write(contents) if chmod is not None: From e7f5df9b2e7c82175bd1997bf30c217f257fbf4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 11 Mar 2013 01:58:13 +0000 Subject: [PATCH 034/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1454972 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 13 +- libcloud/compute/deployment.py | 12 +- libcloud/compute/drivers/digitalocean.py | 206 ++++++++++++++++++ libcloud/compute/providers.py | 4 +- libcloud/compute/ssh.py | 146 +++++++++++-- libcloud/compute/types.py | 1 + .../fixtures/digitalocean/create_node.json | 1 + .../fixtures/digitalocean/destroy_node.json | 1 + .../compute/fixtures/digitalocean/error.txt | 1 + .../digitalocean/ex_create_ssh_key.json | 1 + .../digitalocean/ex_destroy_ssh_key.json | 1 + .../digitalocean/ex_list_ssh_keys.json | 1 + .../fixtures/digitalocean/list_images.json | 145 ++++++++++++ .../fixtures/digitalocean/list_locations.json | 1 + .../fixtures/digitalocean/list_nodes.json | 1 + .../digitalocean/list_nodes_empty.json | 1 + .../fixtures/digitalocean/list_sizes.json | 1 + .../fixtures/digitalocean/reboot_node.json | 1 + libcloud/test/compute/test_deployment.py | 20 ++ libcloud/test/compute/test_digitalocean.py | 153 +++++++++++++ libcloud/test/compute/test_ssh_client.py | 96 +++++--- libcloud/test/secrets.py-dist | 1 + 22 files changed, 753 insertions(+), 55 deletions(-) create mode 100644 libcloud/compute/drivers/digitalocean.py create mode 100644 libcloud/test/compute/fixtures/digitalocean/create_node.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/destroy_node.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/error.txt create mode 100644 libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/list_images.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/list_locations.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/list_nodes.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/list_sizes.json create mode 100644 libcloud/test/compute/fixtures/digitalocean/reboot_node.json create mode 100644 libcloud/test/compute/test_digitalocean.py diff --git a/CHANGES b/CHANGES index cb1e68dbe8..211506c1bf 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud 0.12.2: +Changes with Apache Libcloud in development: *) General @@ -25,7 +25,16 @@ Changes with Apache Libcloud 0.12.2: is private. (LIBCLOUD-297) [Grischa Meyer, Tomaz Muraus] - *) DNS + - Add new driver for DigitalOcean provider - https://www.digitalocean.com/. + (LIBCLOUD-304) + [Tomaz Muraus] + + - Fix a regression in ParamikoSSHClient.run method which caused this methid + to only work as expected if you passed an absolute or a relative path to + the script to it. (LIBCLOUD-278) + [Tomaz Muraus] + + *) DNS - Allow user to specify 'priority' extra argument when creating a MX or SRV record. diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index 8c5bd8edbe..6f24a067d2 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -149,9 +149,17 @@ def run(self, node, client): See also L{Deployment.run} """ + file_path = client.put(path=self.name, chmod=int('755', 8), + contents=self.script) - client.put(path=self.name, chmod=int('755', 8), contents=self.script) - self.stdout, self.stderr, self.exit_status = client.run(self.name) + # Pre-pend cwd if user specified a relative path + if self.name[0] != '/': + base_path = os.path.dirname(file_path) + name = os.path.join(base_path, self.name) + else: + name = self.name + + self.stdout, self.stderr, self.exit_status = client.run(name) if self.delete: client.delete(self.name) diff --git a/libcloud/compute/drivers/digitalocean.py b/libcloud/compute/drivers/digitalocean.py new file mode 100644 index 0000000000..cde0b184e9 --- /dev/null +++ b/libcloud/compute/drivers/digitalocean.py @@ -0,0 +1,206 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Digital Ocean Driver +""" + +from libcloud.utils.py3 import httplib + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + + +class DigitalOceanResponse(JsonResponse): + def parse_error(self): + if self.status == httplib.FOUND and '/api/error' in self.body: + # Hacky, but DigitalOcean error responses are awful + raise InvalidCredsError(self.body) + + +class SSHKey(object): + def __init__(self, id, name, pub_key): + self.id = id + self.name = name + self.pub_key = pub_key + + def __repr__(self): + return (('') % + (self.id, self.name, self.pub_key)) + + +class DigitalOceanConnection(ConnectionUserAndKey): + """ + Connection class for the DigitalOcean driver. + """ + + host = 'api.digitalocean.com' + responseCls = DigitalOceanResponse + + def add_default_params(self, params): + """ + Add parameters that are necessary for every request + + This method adds C{api_key} and C{api_responseFormat} to + the request. + """ + params['client_id'] = self.user_id + params['api_key'] = self.key + return params + + +class DigitalOceanNodeDriver(NodeDriver): + """ + DigitalOceanNode node driver. + """ + + connectionCls = DigitalOceanConnection + + type = Provider.DIGITAL_OCEAN + name = 'Digital Ocean' + website = 'https://www.digitalocean.com' + features = {'create_node': ['ssh_key']} + + NODE_STATE_MAP = {'new': NodeState.PENDING, + 'off': NodeState.REBOOTING, + 'active': NodeState.RUNNING} + + def list_nodes(self): + data = self.connection.request('/droplets').object['droplets'] + return list(map(self._to_node, data)) + + def list_locations(self): + data = self.connection.request('/regions').object['regions'] + return list(map(self._to_location, data)) + + def list_images(self): + data = self.connection.request('/images').object['images'] + return list(map(self._to_image, data)) + + def list_sizes(self): + data = self.connection.request('/sizes').object['sizes'] + return list(map(self._to_size, data)) + + def create_node(self, name, size, image, location, ex_ssh_key_ids=None, + **kwargs): + """ + Create a node. + + @keyword ex_ssh_key_ids: A list of ssh key ids which will be added to + the server. (optional) + @type ex_ssh_key_ids: C{list} of C{str} + + @return: The newly created node. + @rtype: L{Node} + """ + params = {'name': name, 'size_id': size.id, 'image_id': image.id, + 'region_id': location.id} + + if ex_ssh_key_ids: + params['ssh_key_ids'] = ','.join(ex_ssh_key_ids) + + data = self.connection.request('/droplets/new', params=params).object + return self._to_node(data=data['droplet']) + + def reboot_node(self, node): + res = self.connection.request('/droplets/%s/reboot/' % (node.id)) + return res.status == httplib.OK + + def destroy_node(self, node): + res = self.connection.request('/droplets/%s/destroy/' % (node.id)) + return res.status == httplib.OK + + def ex_list_ssh_keys(self): + """ + List all the available SSH keys. + + @return: Available SSH keys. + @rtype: C{list} of L{SSHKey} + """ + data = self.connection.request('/ssh_keys').object['ssh_keys'] + return list(map(self._to_ssh_key, data)) + + def ex_create_ssh_key(self, name, ssh_key_pub): + """ + Create a new SSH key. + + @param name: Key name (required) + @type name: C{str} + + @param name: Valid public key string (required) + @type name: C{str} + """ + params = {'name': name, 'ssh_pub_key': ssh_key_pub} + data = self.connection.request('/ssh_keys/new/', method='GET', + params=params).object + assert 'ssh_key' in data + return self._to_ssh_key(data=data['ssh_key']) + + def ex_destroy_ssh_key(self, key_id): + """ + Delete an existing SSH key. + + @param key_id: SSH key id (required) + @type key_id: C{str} + """ + res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id)) + return res.status == httplib.OK + + def _to_node(self, data): + extra_keys = ['backups_active', 'region_id'] + if 'status' in data: + state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) + else: + state = NodeState.UNKNOWN + + if 'ip_address' in data and data['ip_address'] is not None: + public_ips = [data['ip_address']] + else: + public_ips = [] + + extra = {} + for key in extra_keys: + if key in data: + extra[key] = data[key] + + node = Node(id=data['id'], name=data['name'], state=state, + public_ips=public_ips, private_ips=None, extra=extra, + driver=self) + return node + + def _to_image(self, data): + extra = {'distribution': data['distribution']} + return NodeImage(id=data['id'], name=data['name'], extra=extra, + driver=self) + + def _to_location(self, data): + return NodeLocation(id=data['id'], name=data['name'], country=None, + driver=self) + + def _to_size(self, data): + ram = data['name'].lower() + + if 'mb' in ram: + ram = int(ram.replace('mb', '')) + elif 'gb' in ram: + ram = int(ram.replace('gb', '')) * 1024 + + return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0, + bandwidth=0, price=0, driver=self) + + def _to_ssh_key(self, data): + return SSHKey(id=data['id'], name=data['name'], + pub_key=data.get('ssh_pub_key', None)) diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index d1c078c2c8..8e23f4fefa 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -129,7 +129,9 @@ Provider.HOSTVIRTUAL: ('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'), Provider.ABIQUO: - ('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver') + ('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'), + Provider.DIGITAL_OCEAN: + ('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver') } diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index 39a037a18b..ea5152edd8 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -28,6 +28,10 @@ # warning on Python 2.6. # Ref: https://bugs.launchpad.net/paramiko/+bug/392973 +import os +import subprocess +import logging + from os.path import split as psplit from os.path import join as pjoin @@ -66,7 +70,9 @@ def connect(self): """ Connect to the remote node over SSH. - @return: C{bool} + @return: True if the connection has been successfuly established, False + otherwise. + @rtype: C{bool} """ raise NotImplementedError( 'connect not implemented for this ssh client') @@ -86,6 +92,9 @@ def put(self, path, contents=None, chmod=None, mode='w'): @type mode: C{str} @keyword mode: Mode in which the file is opened. + + @return: Full path to the location where a file has been saved. + @rtype: C{str} """ raise NotImplementedError( 'put not implemented for this ssh client') @@ -96,6 +105,10 @@ def delete(self, path): @type path: C{str} @keyword path: File path on the remote node. + + @return: True if the file has been successfuly deleted, False + otherwise. + @rtype: C{bool} """ raise NotImplementedError( 'delete not implemented for this ssh client') @@ -115,6 +128,10 @@ def run(self, cmd): def close(self): """ Shutdown connection to the remote node. + + @return: True if the connection has been successfuly closed, False + otherwise. + @rtype: C{bool} """ raise NotImplementedError( 'close not implemented for this ssh client') @@ -174,6 +191,8 @@ def put(self, path, contents=None, chmod=None, mode='w'): pass sftp.chdir(part) + cwd = sftp.getcwd() + ak = sftp.file(tail, mode=mode) ak.write(contents) if chmod is not None: @@ -181,25 +200,20 @@ def put(self, path, contents=None, chmod=None, mode='w'): ak.close() sftp.close() + if path[0] == '/': + file_path = path + else: + file_path = pjoin(cwd, path) + + return file_path + def delete(self, path): sftp = self.client.open_sftp() sftp.unlink(path) sftp.close() + return True def run(self, cmd): - if cmd[0] != '/': - # If 'cmd' based on relative path, - # set the absoute path joining the HOME path - sftp = self.client.open_sftp() - # Chdir to its own directory is mandatory because otherwise - # the 'getcwd()' method returns None - sftp.chdir('.') - cwd = sftp.getcwd() - sftp.close() - - # Join the command to the current path - cmd = pjoin(cwd, cmd) - # based on exec_command() bufsize = -1 t = self.client.get_transport() @@ -217,12 +231,112 @@ def run(self, cmd): def close(self): self.client.close() + return True class ShellOutSSHClient(BaseSSHClient): - # TODO: write this one + """ + This client shells out to "ssh" binary to run commands on the remote + server. + + Note: This client should not be used in production. + """ + + def __init__(self, hostname, port=22, username='root', password=None, + key=None, timeout=None): + super(ShellOutSSHClient, self).__init__(hostname, port, username, + password, key, timeout) + if self.password: + raise ValueError('ShellOutSSHClient only supports key auth') + + child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + child.communicate() + + if child.returncode == 127: + raise ValueError('ssh client is not available') + + self.logger = self._get_and_setup_logger() + + def connect(self): + """ + This client doesn't support persistent connections establish a new + connection every time "run" method is called. + """ + return True + + def run(self, cmd): + return self._run_remote_shell_command([cmd]) + + def put(self, path, contents=None, chmod=None, mode='w'): + if mode == 'w': + redirect = '>' + elif mode == 'a': + redirect = '>>' + else: + raise ValueError('Invalid mode: ' + mode) + + cmd = ['echo "%s" %s %s' % (contents, redirect, path)] + self._run_remote_shell_command(cmd) + return path + + def delete(self, path): + cmd = ['rm', '-rf', path] + self._run_remote_shell_command(cmd) + return True + + def close(self): + return True + + def _get_and_setup_logger(self): + logger = logging.getLogger('libcloud.compute.ssh') + path = os.getenv('LIBCLOUD_DEBUG') + + if path: + handler = logging.FileHandler(path) + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) + + return logger + + def _get_base_ssh_command(self): + cmd = ['ssh'] + + if self.key: + cmd += ['-i', self.key] + + if self.timeout: + cmd += ['-oConnectTimeout=%s' % (self.timeout)] + + cmd += ['%s@%s' % (self.username, self.hostname)] + + return cmd + + def _run_remote_shell_command(self, cmd): + """ + Run a command on a remote server. + + @param cmd: Command to run. + @type cmd: C{list} of C{str} + + @return: Command stdout, stderr and status code. + @rtype: C{tuple} + """ + base_cmd = self._get_base_ssh_command() + full_cmd = base_cmd + [' '.join(cmd)] + + self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd))) + + child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = child.communicate() + return (stdout, stderr, child.returncode) + + +class MockSSHClient(BaseSSHClient): pass + SSHClient = ParamikoSSHClient if not have_paramiko: - SSHClient = ShellOutSSHClient + SSHClient = MockSSHClient diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 84081a777b..dabe50e78e 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -110,6 +110,7 @@ class Provider(object): GRIDSPOT = 'gridspot' HOSTVIRTUAL = 'hostvirtual' ABIQUO = 'abiquo' + DIGITAL_OCEAN = 'digitalocean' EC2_US_EAST = 'ec2_us_east' EC2_EU = 'ec2_eu_west' # deprecated name diff --git a/libcloud/test/compute/fixtures/digitalocean/create_node.json b/libcloud/test/compute/fixtures/digitalocean/create_node.json new file mode 100644 index 0000000000..acddc0800b --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/create_node.json @@ -0,0 +1 @@ +{"status":"OK","droplet":{"id":119461,"name":"test-2","image_id":1601,"size_id":66,"event_id":919341}} diff --git a/libcloud/test/compute/fixtures/digitalocean/destroy_node.json b/libcloud/test/compute/fixtures/digitalocean/destroy_node.json new file mode 100644 index 0000000000..bae818d035 --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/destroy_node.json @@ -0,0 +1 @@ +{"status":"OK","event_id":918910} diff --git a/libcloud/test/compute/fixtures/digitalocean/error.txt b/libcloud/test/compute/fixtures/digitalocean/error.txt new file mode 100644 index 0000000000..0e90e519ee --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/error.txt @@ -0,0 +1 @@ +You are being redirected. diff --git a/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json b/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json new file mode 100644 index 0000000000..029cab266e --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json @@ -0,0 +1 @@ +{"status":"OK","ssh_key":{"id":7717,"name":"test1","ssh_pub_key":"aaq"}} diff --git a/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json b/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json new file mode 100644 index 0000000000..12d1b5927d --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json @@ -0,0 +1 @@ +{"status":"OK"} diff --git a/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json b/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json new file mode 100644 index 0000000000..11119763b2 --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json @@ -0,0 +1 @@ +{"status":"OK","ssh_keys":[{"id":7717,"name":"test1"}]} diff --git a/libcloud/test/compute/fixtures/digitalocean/list_images.json b/libcloud/test/compute/fixtures/digitalocean/list_images.json new file mode 100644 index 0000000000..007a57d73b --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/list_images.json @@ -0,0 +1,145 @@ +{ + "status": "OK", + "images": [ + { + "id": 1601, + "name": "CentOS 5.8 x64", + "distribution": "CentOS" + }, + { + "id": 1602, + "name": "CentOS 5.8 x32", + "distribution": "CentOS" + }, + { + "id": 1605, + "name": "CentOS 6.0 x32", + "distribution": "CentOS" + }, + { + "id": 1606, + "name": "Fedora 15 x64", + "distribution": "Fedora" + }, + { + "id": 1609, + "name": "Ubuntu 11.10 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 1611, + "name": "CentOS 6.2 x64", + "distribution": "CentOS" + }, + { + "id": 1615, + "name": "Fedora 16 x64 Server", + "distribution": "Fedora" + }, + { + "id": 1618, + "name": "Fedora 16 x64 Desktop", + "distribution": "Fedora" + }, + { + "id": 2676, + "name": "Ubuntu 12.04 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 12573, + "name": "Debian 6.0 x64", + "distribution": "Debian" + }, + { + "id": 12574, + "name": "CentOS 6.3 x64", + "distribution": "CentOS" + }, + { + "id": 12575, + "name": "Debian 6.0 x32", + "distribution": "Debian" + }, + { + "id": 12578, + "name": "CentOS 6.3 x32", + "distribution": "CentOS" + }, + { + "id": 14097, + "name": "Ubuntu 10.04 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 14098, + "name": "Ubuntu 10.04 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 14218, + "name": "Ubuntu 12.04 x64 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 25306, + "name": "Ubuntu 12.10 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 25485, + "name": "Ubuntu 12.10 x32 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 25489, + "name": "Ubuntu 12.10 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 25493, + "name": "Ubuntu 12.10 x64 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 32387, + "name": "Fedora 17 x32 Server", + "distribution": "Fedora" + }, + { + "id": 32399, + "name": "Fedora 17 x32 Desktop", + "distribution": "Fedora" + }, + { + "id": 32419, + "name": "Fedora 17 x64 Desktop", + "distribution": "Fedora" + }, + { + "id": 32428, + "name": "Fedora 17 x64 Server", + "distribution": "Fedora" + }, + { + "id": 42735, + "name": "Ubuntu 12.04 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 43458, + "name": "Ubuntu 11.04x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 43462, + "name": "Ubuntu 11.04x32 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 46964, + "name": "LAMP on Ubuntu 12.04", + "distribution": "Ubuntu" + } + ] +} diff --git a/libcloud/test/compute/fixtures/digitalocean/list_locations.json b/libcloud/test/compute/fixtures/digitalocean/list_locations.json new file mode 100644 index 0000000000..a87b38f2cd --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/list_locations.json @@ -0,0 +1 @@ +{"status":"OK","regions":[{"id":1,"name":"New York 1"},{"id":2,"name":"Amsterdam 1"}]} diff --git a/libcloud/test/compute/fixtures/digitalocean/list_nodes.json b/libcloud/test/compute/fixtures/digitalocean/list_nodes.json new file mode 100644 index 0000000000..5fe59491ac --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/list_nodes.json @@ -0,0 +1 @@ +{"status":"OK","droplets":[{"id":119461,"name":"test-2","image_id":1601,"size_id":66,"region_id":1,"backups_active":null,"ip_address":null,"status":"new"}]} diff --git a/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json b/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json new file mode 100644 index 0000000000..bc62108c5a --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json @@ -0,0 +1 @@ +{"status":"OK","droplets":[]} diff --git a/libcloud/test/compute/fixtures/digitalocean/list_sizes.json b/libcloud/test/compute/fixtures/digitalocean/list_sizes.json new file mode 100644 index 0000000000..99acc575c6 --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/list_sizes.json @@ -0,0 +1 @@ +{"status":"OK","sizes":[{"id":66,"name":"512MB"},{"id":63,"name":"1GB"},{"id":62,"name":"2GB"},{"id":64,"name":"4GB"},{"id":65,"name":"8GB"},{"id":61,"name":"16GB"},{"id":60,"name":"32GB"},{"id":70,"name":"48GB"},{"id":69,"name":"64GB"},{"id":68,"name":"96GB"}]} diff --git a/libcloud/test/compute/fixtures/digitalocean/reboot_node.json b/libcloud/test/compute/fixtures/digitalocean/reboot_node.json new file mode 100644 index 0000000000..bae818d035 --- /dev/null +++ b/libcloud/test/compute/fixtures/digitalocean/reboot_node.json @@ -0,0 +1 @@ +{"status":"OK","event_id":918910} diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index b48d98e562..7da638a28d 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -111,6 +111,26 @@ def test_script_deployment(self): self.assertEqual(self.node, sd2.run(node=self.node, client=MockClient(hostname='localhost'))) + def test_script_deployment_relative_path(self): + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + sd = ScriptDeployment(script='echo "foo"', name='relative.sh') + sd.run(self.node, client) + + client.run.assert_called_once_with('/home/ubuntu/relative.sh') + + def test_script_deployment_absolute_path(self): + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + sd = ScriptDeployment(script='echo "foo"', name='/root/relative.sh') + sd.run(self.node, client) + + client.run.assert_called_once_with('/root/relative.sh') + def test_script_deployment_and_sshkey_deployment_argument_types(self): class FileObject(object): def __init__(self, name): diff --git a/libcloud/test/compute/test_digitalocean.py b/libcloud/test/compute/test_digitalocean.py new file mode 100644 index 0000000000..e6d8ab35e9 --- /dev/null +++ b/libcloud/test/compute/test_digitalocean.py @@ -0,0 +1,153 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import base64 + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b +from libcloud.utils.py3 import u + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.digitalocean import DigitalOceanNodeDriver +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import DIGITAL_OCEAN_PARAMS + + +#class DigitalOceanTests(unittest.TestCase, TestCaseMixin): +class DigitalOceanTests(unittest.TestCase): + def setUp(self): + DigitalOceanNodeDriver.connectionCls.conn_classes = \ + (None, DigitalOceanMockHttp) + DigitalOceanMockHttp.type = None + self.driver = DigitalOceanNodeDriver(*DIGITAL_OCEAN_PARAMS) + + def test_authentication(self): + DigitalOceanMockHttp.type = 'UNAUTHORIZED_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + def test_list_images_success(self): + images = self.driver.list_images() + self.assertTrue(len(images) >= 1) + + image = images[0] + self.assertTrue(image.id is not None) + self.assertTrue(image.name is not None) + + def test_list_sizes_success(self): + sizes = self.driver.list_sizes() + self.assertTrue(len(sizes) >= 1) + + size = sizes[0] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '512MB') + self.assertEqual(size.ram, 512) + + size = sizes[4] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '8GB') + self.assertEqual(size.ram, 8 * 1024) + + def test_list_locations_success(self): + locations = self.driver.list_locations() + self.assertTrue(len(locations) >= 1) + + location = locations[0] + self.assertEqual(location.id, '1') + self.assertEqual(location.name, 'New York 1') + + def test_list_nodes_success(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + self.assertEqual(nodes[0].name, 'test-2') + self.assertEqual(nodes[0].public_ips, []) + + def test_reboot_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.reboot_node(node) + self.assertTrue(result) + + def test_destroy_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.destroy_node(node) + self.assertTrue(result) + + def test_ex_list_ssh_keys(self): + keys = self.driver.ex_list_ssh_keys() + self.assertEqual(len(keys), 1) + + self.assertEqual(keys[0].id, 7717) + self.assertEqual(keys[0].name, 'test1') + self.assertEqual(keys[0].pub_key, None) + + def test_ex_destroy_ssh_key(self): + key = self.driver.ex_list_ssh_keys()[0] + result = self.driver.ex_destroy_ssh_key(key.id) + self.assertTrue(result) + + +class DigitalOceanMockHttp(MockHttp): + fixtures = ComputeFileFixtures('digitalocean') + + def _regions(self, method, url, body, headers): + body = self.fixtures.load('list_locations.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _images(self, method, url, body, headers): + body = self.fixtures.load('list_images.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _sizes(self, method, url, body, headers): + body = self.fixtures.load('list_sizes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets(self, method, url, body, headers): + body = self.fixtures.load('list_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_119461_reboot(self, method, url, body, headers): + # reboot_node + body = self.fixtures.load('reboot_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_119461_destroy(self, method, url, body, headers): + # destroy_node + body = self.fixtures.load('destroy_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ssh_keys(self, method, url, body, headers): + body = self.fixtures.load('ex_list_ssh_keys.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ssh_keys_7717_destroy(self, method, url, body, headers): + # destroy_ssh_key + body = self.fixtures.load('ex_destroy_ssh_key.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_UNAUTHORIZED_CLIENT(self, method, url, body, headers): + body = self.fixtures.load('error.txt') + return (httplib.FOUND, body, {}, httplib.responses[httplib.FOUND]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/compute/test_ssh_client.py b/libcloud/test/compute/test_ssh_client.py index df3e26ece5..b1792a549d 100644 --- a/libcloud/test/compute/test_ssh_client.py +++ b/libcloud/test/compute/test_ssh_client.py @@ -14,10 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import + import sys import unittest from libcloud.compute.ssh import ParamikoSSHClient +from libcloud.compute.ssh import ShellOutSSHClient from libcloud.compute.ssh import have_paramiko from mock import patch, Mock @@ -114,40 +117,6 @@ def test_basic_usage_absolute_path(self): mock.close() - def test_run_script_with_relative_path(self): - """ - Execute script with relative path. - """ - mock = self.ssh_cli - - # Define behaviour then ask for 'current directory' - mock.client.open_sftp().getcwd.return_value = '/home/ubuntu/' - - # Script without full path - sd = 'random_script.sh' - - # Without assertions because they are the same than the previous - # 'test_basic_usage' method - mock.connect() - - mock_cli = mock.client # The actual mocked object: SSHClient - - mock.put(sd, chmod=600) - # Make assertions over 'put' method - mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', - mode='w') - mock_cli.open_sftp().file().chmod.assert_called_once_with(600) - - mock.run(sd) - # Make assertions over the 'run' method - mock_cli.open_sftp().chdir.assert_called_with(".") - mock_cli.open_sftp().getcwd.assert_called_once() - full_sd = '/home/ubuntu/random_script.sh' - mock_cli.get_transport().open_session().exec_command \ - .assert_called_once_with(full_sd) - - mock.close() - def test_delete_script(self): """ Provide a basic test with 'delete' action. @@ -164,10 +133,69 @@ def test_delete_script(self): mock.close() + if not ParamikoSSHClient: class ParamikoSSHClientTests(unittest.TestCase): pass +class ShellOutSSHClientTests(unittest.TestCase): + def test_password_auth_not_supported(self): + try: + ShellOutSSHClient(hostname='localhost', username='foo', + password='bar') + except ValueError: + e = sys.exc_info()[1] + msg = str(e) + self.assertTrue('ShellOutSSHClient only supports key auth' in msg) + else: + self.fail('Exception was not thrown') + + def test_ssh_executable_not_available(self): + class MockChild(object): + returncode = 127 + + def communicate(*args, **kwargs): + pass + + def mock_popen(*args, **kwargs): + return MockChild() + + with patch('subprocess.Popen', mock_popen): + try: + ShellOutSSHClient(hostname='localhost', username='foo') + except ValueError: + e = sys.exc_info()[1] + msg = str(e) + self.assertTrue('ssh client is not available' in msg) + else: + self.fail('Exception was not thrown') + + def test_connect_success(self): + client = ShellOutSSHClient(hostname='localhost', username='root') + self.assertTrue(client.connect()) + + def test_close_success(self): + client = ShellOutSSHClient(hostname='localhost', username='root') + self.assertTrue(client.close()) + + def test_get_base_ssh_command(self): + client1 = ShellOutSSHClient(hostname='localhost', username='root') + client2 = ShellOutSSHClient(hostname='localhost', username='root', + key='/home/my.key') + client3 = ShellOutSSHClient(hostname='localhost', username='root', + key='/home/my.key', timeout=5) + + cmd1 = client1._get_base_ssh_command() + cmd2 = client2._get_base_ssh_command() + cmd3 = client3._get_base_ssh_command() + + self.assertEquals(cmd1, ['ssh', 'root@localhost']) + self.assertEquals(cmd2, ['ssh', '-i', '/home/my.key', + 'root@localhost']) + self.assertEquals(cmd3, ['ssh', '-i', '/home/my.key', + '-oConnectTimeout=5', 'root@localhost']) + + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index df221020fe..cd6c83b50e 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -39,6 +39,7 @@ JOYENT_PARAMS = ('user', 'key') VCL_PARAMS = ('user', 'pass', True, 'foo.bar.com') GRIDSPOT_PARAMS = ('key',) HOSTVIRTUAL_PARAMS = ('key',) +DIGITAL_OCEAN_PARAMS = ('user', 'key') # Storage STORAGE_S3_PARAMS = ('key', 'secret') From 527c6156035757fa2497135fad7df67383545cbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 16 Mar 2013 04:18:30 +0000 Subject: [PATCH 035/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1457201 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 6 ++++++ libcloud/common/gandi.py | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/CHANGES b/CHANGES index 211506c1bf..26d20bef30 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,12 @@ Changes with Apache Libcloud in development: Reported by Arfrever Frehtes Taifersar Arahesis. [Tomaz Muraus] + - Fix a regression introduced with recent xmlrpiclib changes which broke all + the Gandi.net drivers. (LIBCLOUD-288) + + Reported by Hutson Betts. + [Tomaz Muraus] + - Improve deploy code to work correctly if the ssh user doesn't have access to the /root directory. diff --git a/libcloud/common/gandi.py b/libcloud/common/gandi.py index 17a9193b0e..fc7d5afa7d 100644 --- a/libcloud/common/gandi.py +++ b/libcloud/common/gandi.py @@ -57,6 +57,14 @@ class GandiConnection(XMLRPCConnection, ConnectionKey): host = 'rpc.gandi.net' endpoint = '/xmlrpc/' + def __init__(self, key, secure=True): + # Note: Method resolution order in this case is + # XMLRPCConnection -> Connection and Connection doesn't take key as the + # first argument so we specify a keyword argument instead. + # Previously it was GandiConnection -> ConnectionKey so it worked fine. + super(GandiConnection, self).__init__(key=key, secure=secure) + self.driver = BaseGandiDriver + def request(self, method, *args): args = (self.key, ) + args return super(GandiConnection, self).request(method, *args) From 60ce7d46cd03e49e30f128bd9ffab9b7db10435c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 16 Mar 2013 04:21:02 +0000 Subject: [PATCH 036/143] Bump version. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1457202 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- libcloud/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 26d20bef30..0ef6106557 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud in development: +Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/__init__.py b/libcloud/__init__.py index d39656cde0..be3db46d8f 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.2' +__version__ = '0.12.3' try: import paramiko From b483f4d72346d0d29b4195e56457e73e2ef8992e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 23 Mar 2013 04:41:17 +0000 Subject: [PATCH 037/143] Backport commit 1460093 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1460095 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 8 ++++++++ libcloud/compute/drivers/softlayer.py | 3 ++- libcloud/test/compute/test_softlayer.py | 14 +++++++------- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 0ef6106557..8e2d8a0e9a 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,13 @@ -*- coding: utf-8 -*- +Changes with Apache Libcloud in development: + + *) Compute + + - Fix a regression in Softlayer driver caused by the xmlrpclib changes. + (LIBCLOUD-310) + [Jason Johnson] + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py index babc89c866..e5611c66e0 100644 --- a/libcloud/compute/drivers/softlayer.py +++ b/libcloud/compute/drivers/softlayer.py @@ -108,7 +108,8 @@ class SoftLayerResponse(XMLRPCResponse): class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey): responseCls = SoftLayerResponse - endpoint = '/xmlrpc/v3/' + host = 'api.softlayer.com' + endpoint = '/xmlrpc/v3' def request(self, service, method, *args, **kwargs): headers = {} diff --git a/libcloud/test/compute/test_softlayer.py b/libcloud/test/compute/test_softlayer.py index fc58d12e68..934aaf9e30 100644 --- a/libcloud/test/compute/test_softlayer.py +++ b/libcloud/test/compute/test_softlayer.py @@ -143,24 +143,24 @@ def _xmlrpc(self, method, url, body, headers): meth_name = "%s_%s" % (url, meth_name) return getattr(self, meth_name)(method, url, body, headers) - def _xmlrpc_v3__SoftLayer_Virtual_Guest_getCreateObjectOptions( + def _xmlrpc_v3_SoftLayer_Virtual_Guest_getCreateObjectOptions( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Account_getVirtualGuests( + def _xmlrpc_v3_SoftLayer_Account_getVirtualGuests( self, method, url, body, headers): body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Location_Datacenter_getDatacenters( + def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters( self, method, url, body, headers): body = self.fixtures.load( 'v3_SoftLayer_Location_Datacenter_getDatacenters.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Virtual_Guest_createObject( + def _xmlrpc_v3_SoftLayer_Virtual_Guest_createObject( self, method, url, body, headers): fixture = { None: 'v3__SoftLayer_Virtual_Guest_createObject.xml', @@ -170,18 +170,18 @@ def _xmlrpc_v3__SoftLayer_Virtual_Guest_createObject( body = self.fixtures.load(fixture) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Virtual_Guest_getObject( + def _xmlrpc_v3_SoftLayer_Virtual_Guest_getObject( self, method, url, body, headers): body = self.fixtures.load( 'v3__SoftLayer_Virtual_Guest_getObject.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Virtual_Guest_rebootSoft( + def _xmlrpc_v3_SoftLayer_Virtual_Guest_rebootSoft( self, method, url, body, headers): body = self.fixtures.load('empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc_v3__SoftLayer_Virtual_Guest_deleteObject( + def _xmlrpc_v3_SoftLayer_Virtual_Guest_deleteObject( self, method, url, body, headers): body = self.fixtures.load('empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) From 59eb758092c83c83c930a7c743daf301379efc85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 27 Mar 2013 03:05:58 +0000 Subject: [PATCH 038/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1461394 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 ++ libcloud/compute/base.py | 111 ++++++++++++++++++++++++++------------- 2 files changed, 80 insertions(+), 36 deletions(-) diff --git a/CHANGES b/CHANGES index 8e2d8a0e9a..b1aff7f283 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,11 @@ Changes with Apache Libcloud in development: (LIBCLOUD-310) [Jason Johnson] + - Allow user to pass alternate ssh usernames to deploy_node + (ssh_alternate_usernames kwarg) which are used for authentication if the + default one doesn't work. (LIBCLOUD-309) + [Chris Psaltis, Tomaz Muraus] + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 15330ff13d..66be6a5205 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -592,6 +592,11 @@ def deploy_node(self, **kwargs): SSH server (default is root) @type ssh_username: C{str} + @keyword ssh_alternate_usernames: Optional list of ssh usernames to + try to connect with if using the + default one fails + @type ssh_alternate_usernames: C{list} + @keyword ssh_port: Optional SSH server port (default is 22) @type ssh_port: C{int} @@ -647,42 +652,53 @@ def deploy_node(self, **kwargs): if 'generates_password' in self.features['create_node']: password = node.extra.get('password') + ssh_interface = kwargs.get('ssh_interface', 'public_ips') + + # Wait until node is up and running and has IP assigned try: - # Wait until node is up and running and has IP assigned - ssh_interface = kwargs.get('ssh_interface', 'public_ips') node, ip_addresses = self.wait_until_running( nodes=[node], wait_period=3, timeout=NODE_ONLINE_WAIT_TIMEOUT, ssh_interface=ssh_interface)[0] - - if password: - node.extra['password'] = password - - ssh_username = kwargs.get('ssh_username', 'root') - ssh_port = kwargs.get('ssh_port', 22) - ssh_timeout = kwargs.get('ssh_timeout', 10) - ssh_key_file = kwargs.get('ssh_key', None) - timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT) - - ssh_client = SSHClient(hostname=ip_addresses[0], - port=ssh_port, username=ssh_username, - password=password, - key=ssh_key_file, - timeout=ssh_timeout) - - # Connect to the SSH server running on the node - ssh_client = self._ssh_client_connect(ssh_client=ssh_client, - timeout=timeout) - - # Execute the deployment task - self._run_deployment_script(task=kwargs['deploy'], - node=node, - ssh_client=ssh_client, - max_tries=max_tries) except Exception: e = sys.exc_info()[1] raise DeploymentError(node=node, original_exception=e, driver=self) + if password: + node.extra['password'] = password + + ssh_username = kwargs.get('ssh_username', 'root') + ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', []) + ssh_port = kwargs.get('ssh_port', 22) + ssh_timeout = kwargs.get('ssh_timeout', 10) + ssh_key_file = kwargs.get('ssh_key', None) + timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT) + + deploy_error = None + + for username in ([ssh_username] + ssh_alternate_usernames): + try: + self._connect_and_run_deployment_script( + task=kwargs['deploy'], node=node, + ssh_hostname=ip_addresses[0], ssh_port=ssh_port, + ssh_username=username, ssh_password=password, + ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout, + timeout=timeout, max_tries=max_tries) + except Exception: + # Try alternate username + # Todo: Need to fix paramiko so we can catch a more specific + # exception + e = sys.exc_info()[1] + deploy_error = e + else: + # Script sucesfully executed, don't try alternate username + deploy_error = None + break + + if deploy_error is not None: + raise DeploymentError(node=node, original_exception=deploy_error, + driver=self) + return node def create_volume(self, size, name, location=None, snapshot=None): @@ -757,14 +773,16 @@ def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): # This is here for backward compatibility and will be removed in the # next major release - return wait_until_running(nodes=[node], wait_period=wait_period, - timeout=timeout, ssh_interface=ssh_interface, - force_ipv4=force_ipv4) + return self._wait_until_running(nodes=[node], wait_period=wait_period, + timeout=timeout, + ssh_interface=ssh_interface, + force_ipv4=force_ipv4) def wait_until_running(self, nodes, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): """ - Block until the given nodes are fully booted and have an IP address assigned. + Block until the given nodes are fully booted and have an IP address + assigned. @keyword nodes: list of node instances. @type nodes: C{List} of L{Node} @@ -817,13 +835,14 @@ def filter_addresses(addresses): if len(nodes) > len(uuids): found_uuids = [n.uuid for n in nodes] - raise LibcloudError(value=('Unable to match specified uuids ' + - '(%s) with existing nodes. Found ' % uuids + - 'multiple nodes with same uuid: (%s)' % found_uuids), - driver=self) + msg = ('Unable to match specified uuids ' + + '(%s) with existing nodes. Found ' % (uuids) + + 'multiple nodes with same uuid: (%s)' % (found_uuids)) + raise LibcloudError(value=msg, driver=self) running_nodes = [n for n in nodes if n.state == NodeState.RUNNING] - addresses = [filter_addresses(getattr(n, ssh_interface)) for n in running_nodes] + addresses = [filter_addresses(getattr(n, ssh_interface)) for n in + running_nodes] if len(running_nodes) == len(uuids) == len(addresses): return list(zip(running_nodes, addresses)) else: @@ -869,6 +888,25 @@ def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): raise LibcloudError(value='Could not connect to the remote SSH ' + 'server. Giving up.', driver=self) + def _connect_and_run_deployment_script(self, task, node, ssh_hostname, + ssh_port, ssh_username, + ssh_password, ssh_key_file, + ssh_timeout, timeout, max_tries): + ssh_client = SSHClient(hostname=ssh_hostname, + port=ssh_port, username=ssh_username, + password=ssh_password, + key=ssh_key_file, + timeout=ssh_timeout) + + # Connect to the SSH server running on the node + ssh_client = self._ssh_client_connect(ssh_client=ssh_client, + timeout=timeout) + + # Execute the deployment task + self._run_deployment_script(task=task, node=node, + ssh_client=ssh_client, + max_tries=max_tries) + def _run_deployment_script(self, task, node, ssh_client, max_tries=3): """ Run the deployment script on the provided node. At this point it is @@ -894,6 +932,7 @@ def _run_deployment_script(self, task, node, ssh_client, max_tries=3): try: node = task.run(node, ssh_client) except Exception: + e = sys.exc_info()[1] tries += 1 if tries >= max_tries: e = sys.exc_info()[1] From d8ab48b5af0e444df2bbc3d3ccce2eb691c269a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 27 Mar 2013 03:31:51 +0000 Subject: [PATCH 039/143] Fix a bug in EC2 list_locations method - 'name' attribute didn't contain a the right value. Reported by Rudolf Streif on the mailing list. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1461400 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ libcloud/compute/drivers/ec2.py | 2 +- libcloud/test/compute/test_ec2.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index b1aff7f283..293591df84 100644 --- a/CHANGES +++ b/CHANGES @@ -13,6 +13,10 @@ Changes with Apache Libcloud in development: default one doesn't work. (LIBCLOUD-309) [Chris Psaltis, Tomaz Muraus] + - Fix a bug in EC2 list_locations method - 'name' attribute didn't contain a + the right value. + [Tomaz Muraus] + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index db2e8db4cf..1434932ddf 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -672,7 +672,7 @@ def list_locations(self): for index, availability_zone in \ enumerate(self.ex_list_availability_zones()): locations.append(EC2NodeLocation( - index, availability_zone, self.country, self, + index, availability_zone.name, self.country, self, availability_zone) ) return locations diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index a91b8ec2f6..25944f3673 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -28,6 +28,7 @@ from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver from libcloud.compute.drivers.ec2 import IdempotentParamError from libcloud.compute.drivers.ec2 import REGION_DETAILS +from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone from libcloud.utils.py3 import urlparse from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation from libcloud.compute.base import StorageVolume @@ -151,7 +152,10 @@ def test_list_nodes_with_name_tag(self): def test_list_location(self): locations = self.driver.list_locations() self.assertTrue(len(locations) > 0) + self.assertEqual(locations[0].name, 'eu-west-1a') self.assertTrue(locations[0].availability_zone != None) + self.assertTrue(isinstance(locations[0].availability_zone, + ExEC2AvailabilityZone)) def test_list_security_groups(self): groups = self.driver.ex_list_security_groups() From c7428d336c041f61d9df2bfb12b24dc5462f146f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 27 Mar 2013 03:39:36 +0000 Subject: [PATCH 040/143] Fix __repr__. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1461401 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/compute/drivers/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 1434932ddf..bb303d86c5 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -348,7 +348,7 @@ def __repr__(self): return (('') % (self.id, self.name, self.country, - self.availability_zone.name, self.driver.name)) + self.availability_zone, self.driver.name)) class EC2Response(AWSBaseResponse): From 8b72056b430cd9b9a55c4703164a7df673c57292 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 29 Mar 2013 17:05:21 +0000 Subject: [PATCH 041/143] Backport commit 1462534 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1462539 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ libcloud/compute/deployment.py | 28 +++++++++++++++++++++++- libcloud/test/compute/test_deployment.py | 10 ++++++++- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 293591df84..bd467ad7d2 100644 --- a/CHANGES +++ b/CHANGES @@ -17,6 +17,10 @@ Changes with Apache Libcloud in development: the right value. [Tomaz Muraus] + - Add new ScriptFileDeployment deployment class which reads deploy script + from a file. + [Rudolf J Streif] + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index 6f24a067d2..f00222291a 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -123,7 +123,8 @@ def __init__(self, script, name=None, delete=False): @keyword script: Contents of the script to run @type name: C{str} - @keyword name: Name of the script to upload it as, if not specified, a random name will be choosen. + @keyword name: Name of the script to upload it as, if not specified, + a random name will be choosen. @type delete: C{bool} @keyword delete: Whether to delete the script on completion. @@ -167,6 +168,31 @@ def run(self, node, client): return node +class ScriptFileDeployment(ScriptDeployment): + """ + Runs an arbitrary Shell Script task from a file. + """ + + def __init__(self, script_file, name=None, delete=False): + """ + @type script_file: C{str} + @keyword script_file: Path to a file containing the script to run + + @type name: C{str} + @keyword name: Name of the script to upload it as, if not specified, + a random name will be choosen. + + @type delete: C{bool} + @keyword delete: Whether to delete the script on completion. + """ + with open(script_file, 'rb') as fp: + content = fp.read() + + super(ScriptFileDeployment, self).__init__(script=content, + name=name, + delete=delete) + + class MultiStepDeployment(Deployment): """ Runs a chain of Deployment steps. diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index 7da638a28d..903f40da75 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -24,7 +24,7 @@ from libcloud.compute.deployment import MultiStepDeployment, Deployment from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment -from libcloud.compute.deployment import FileDeployment +from libcloud.compute.deployment import ScriptFileDeployment, FileDeployment from libcloud.compute.base import Node from libcloud.compute.types import NodeState, DeploymentError, LibcloudError from libcloud.compute.ssh import BaseSSHClient @@ -111,6 +111,14 @@ def test_script_deployment(self): self.assertEqual(self.node, sd2.run(node=self.node, client=MockClient(hostname='localhost'))) + def test_script_file_deployment(self): + file_path = os.path.abspath(__file__) + with open(file_path, 'rb') as fp: + content = fp.read() + + sfd1 = ScriptFileDeployment(script_file=file_path) + self.assertEqual(sfd1.script, content) + def test_script_deployment_relative_path(self): client = Mock() client.put.return_value = '/home/ubuntu/relative.sh' From 7838d9feed249d96b0aabaae996d5556501e46d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 29 Mar 2013 18:00:38 +0000 Subject: [PATCH 042/143] Backport commit 1462561 from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1462562 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/test/compute/test_deployment.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index 903f40da75..c4f106db2f 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import with_statement + import os import sys import time From e2cca0656ecce3537cea00fef206cc71a00d9a64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 13 Apr 2013 20:45:23 +0000 Subject: [PATCH 043/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1467703 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++ libcloud/compute/drivers/vcloud.py | 14 ++++++ libcloud/test/compute/test_linode.py | 5 ++- libcloud/test/compute/test_ssh_client.py | 1 + libcloud/test/compute/test_vcloud.py | 54 ++++++++++++++++++++++++ 5 files changed, 77 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index bd467ad7d2..1491d4fc1d 100644 --- a/CHANGES +++ b/CHANGES @@ -21,6 +21,11 @@ Changes with Apache Libcloud in development: from a file. [Rudolf J Streif] + - Add support for API version 5.1 to the vCloud driver and accept any value + which is a multiple of four for ex_vm_memory kwarg in create_node method. + (LIBCLOUD-314) + [Trevor Powell] + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/compute/drivers/vcloud.py b/libcloud/compute/drivers/vcloud.py index 86aaa1f98d..735cd7d1a8 100644 --- a/libcloud/compute/drivers/vcloud.py +++ b/libcloud/compute/drivers/vcloud.py @@ -380,6 +380,8 @@ def __new__(cls, key, secret=None, secure=True, host=None, port=None, cls = VCloudNodeDriver elif api_version == '1.5': cls = VCloud_1_5_NodeDriver + elif api_version == '5.1': + cls = VCloud_1_5_NodeDriver else: raise NotImplementedError( "No VCloudNodeDriver found for API version %s" % @@ -1858,3 +1860,15 @@ def get_capacity_values(capacity_elm): cpu=cpu, memory=memory, storage=storage) + + +class VCloud_5_1_NodeDriver(VCloud_1_5_NodeDriver): + + @staticmethod + def _validate_vm_memory(vm_memory): + if vm_memory is None: + return None + elif (vm_memory % 4) != 0: + #The vcd 5.1 virtual machine memory size must be a multiple of 4 MB + raise ValueError( + '%s is not a valid vApp VM memory value' % (vm_memory)) diff --git a/libcloud/test/compute/test_linode.py b/libcloud/test/compute/test_linode.py index 10c03d8ed1..db4f3fc1f1 100644 --- a/libcloud/test/compute/test_linode.py +++ b/libcloud/test/compute/test_linode.py @@ -41,6 +41,7 @@ def test_list_nodes(self): node = nodes[0] self.assertEqual(node.id, "8098") self.assertEqual(node.name, 'api-node3') + self.assertEqual(node.extra['PLANID'], '1') self.assertTrue('75.127.96.245' in node.public_ips) self.assertEqual(node.private_ips, []) @@ -97,7 +98,7 @@ def _avail_datacenters(self, method, url, body, headers): return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _avail_linodeplans(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"avail.linodeplans","DATA":[{"AVAIL":{"2":27,"3":0,"4":0,"6":0},"DISK":16,"PRICE":19.95,"PLANID":1,"LABEL":"Linode 360","RAM":360,"XFER":200},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":24,"PRICE":29.95,"PLANID":2,"LABEL":"Linode 540","RAM":540,"XFER":300},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":32,"PRICE":39.95,"PLANID":3,"LABEL":"Linode 720","RAM":720,"XFER":400},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":48,"PRICE":59.95,"PLANID":4,"LABEL":"Linode 1080","RAM":1080,"XFER":600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":64,"PRICE":79.95,"PLANID":5,"LABEL":"Linode 1440","RAM":1440,"XFER":800},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":128,"PRICE":159.95,"PLANID":6,"LABEL":"Linode 2880","RAM":2880,"XFER":1600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":256,"PRICE":319.95,"PLANID":7,"LABEL":"Linode 5760","RAM":5760,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":384,"PRICE":479.95,"PLANID":8,"LABEL":"Linode 8640","RAM":8640,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":512,"PRICE":639.95,"PLANID":9,"LABEL":"Linode 11520","RAM":11520,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":640,"PRICE":799.95,"PLANID":10,"LABEL":"Linode 14400","RAM":14400,"XFER":2000}]}' + body = '{"ERRORARRAY":[],"ACTION":"avail.linodeplans","DATA":[{"AVAIL":{"2":27,"3":0,"4":0,"6":0},"DISK":16,"PRICE":19.95,"PLANID":1,"LABEL":"Linode 360","RAM":360,"XFER":200},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":24,"PRICE":29.95,"PLANID":2,"LABEL":"Linode 512","RAM":512,"XFER":300},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":32,"PRICE":39.95,"PLANID":3,"LABEL":"Linode 720","RAM":720,"XFER":400},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":48,"PRICE":59.95,"PLANID":4,"LABEL":"Linode 1080","RAM":1080,"XFER":600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":64,"PRICE":79.95,"PLANID":5,"LABEL":"Linode 1440","RAM":1440,"XFER":800},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":128,"PRICE":159.95,"PLANID":6,"LABEL":"Linode 2880","RAM":2880,"XFER":1600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":256,"PRICE":319.95,"PLANID":7,"LABEL":"Linode 5760","RAM":5760,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":384,"PRICE":479.95,"PLANID":8,"LABEL":"Linode 8640","RAM":8640,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":512,"PRICE":639.95,"PLANID":9,"LABEL":"Linode 11520","RAM":11520,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":640,"PRICE":799.95,"PLANID":10,"LABEL":"Linode 14400","RAM":14400,"XFER":2000}]}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _avail_distributions(self, method, url, body, headers): @@ -141,7 +142,7 @@ def _linode_config_create(self, method, url, body, headers): return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_list(self, method, url, body, headers): - body = '{"ACTION": "linode.list", "DATA": [{"ALERT_DISKIO_ENABLED": 1, "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "DATACENTERID": 5, "ALERT_BWOUT_ENABLED": 1, "ALERT_CPU_THRESHOLD": 10, "TOTALHD": 100, "ALERT_BWQUOTA_THRESHOLD": 81, "ALERT_BWQUOTA_ENABLED": 1, "TOTALXFER": 200, "STATUS": 2, "ALERT_BWIN_ENABLED": 1, "ALERT_BWIN_THRESHOLD": 5, "ALERT_DISKIO_THRESHOLD": 200, "WATCHDOG": 1, "LINODEID": 8098, "BACKUPWINDOW": 1, "TOTALRAM": 540, "LPM_DISPLAYGROUP": "", "ALERT_BWOUT_THRESHOLD": 5, "BACKUPSENABLED": 1, "ALERT_CPU_ENABLED": 1}], "ERRORARRAY": []}' + body = '{"ACTION": "linode.list", "DATA": [{"ALERT_DISKIO_ENABLED": 1, "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "DATACENTERID": 5, "ALERT_BWOUT_ENABLED": 1, "ALERT_CPU_THRESHOLD": 10, "TOTALHD": 100, "ALERT_BWQUOTA_THRESHOLD": 81, "ALERT_BWQUOTA_ENABLED": 1, "TOTALXFER": 200, "STATUS": 2, "ALERT_BWIN_ENABLED": 1, "ALERT_BWIN_THRESHOLD": 5, "ALERT_DISKIO_THRESHOLD": 200, "WATCHDOG": 1, "LINODEID": 8098, "BACKUPWINDOW": 1, "TOTALRAM": 512, "LPM_DISPLAYGROUP": "", "ALERT_BWOUT_THRESHOLD": 5, "BACKUPSENABLED": 1, "ALERT_CPU_ENABLED": 1}], "ERRORARRAY": []}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _linode_ip_list(self, method, url, body, headers): diff --git a/libcloud/test/compute/test_ssh_client.py b/libcloud/test/compute/test_ssh_client.py index b1792a549d..06287f8878 100644 --- a/libcloud/test/compute/test_ssh_client.py +++ b/libcloud/test/compute/test_ssh_client.py @@ -15,6 +15,7 @@ # limitations under the License. from __future__ import absolute_import +from __future__ import with_statement import sys import unittest diff --git a/libcloud/test/compute/test_vcloud.py b/libcloud/test/compute/test_vcloud.py index 3f9744f10a..46095dd370 100644 --- a/libcloud/test/compute/test_vcloud.py +++ b/libcloud/test/compute/test_vcloud.py @@ -271,6 +271,60 @@ def test_ex_set_control_access(self): self.driver.ex_set_control_access(node, control_access) +class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VCloudNodeDriver.connectionCls.host = 'test' + VCloudNodeDriver.connectionCls.conn_classes = (None, VCloud_1_5_MockHttp) + VCloud_1_5_MockHttp.type = None + self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS, **{'api_version': '5.1'}) + + def _test_create_node_valid_ex_vm_memory(self): + # TODO: Hook up the fixture + values = [4, 1024, 4096] + + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + + for value in values: + self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ex_vm_memory=value + ) + + def test_create_node_invalid_ex_vm_memory(self): + values = [1, 3, 7] + + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + + for value in values: + try: + self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ex_vm_memory=value + ) + except ValueError: + pass + else: + self.fail('Exception was not thrown') + + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual('https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id) + + class TerremarkMockHttp(MockHttp): fixtures = ComputeFileFixtures('terremark') From 6687e9570d105e64d262deb965e53946740d5cb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 16 Apr 2013 17:56:25 +0000 Subject: [PATCH 044/143] Fix a regression with removed ex_force_service_region constructor kwarg in the CloudFiles driver. Part of LIBCLOUD-260. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1468530 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++++ libcloud/storage/drivers/cloudfiles.py | 10 ++++++++++ libcloud/test/storage/test_cloudfiles.py | 17 +++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/CHANGES b/CHANGES index 1491d4fc1d..b036554b3c 100644 --- a/CHANGES +++ b/CHANGES @@ -26,6 +26,11 @@ Changes with Apache Libcloud in development: (LIBCLOUD-314) [Trevor Powell] + *) Storage + + - Fix a regression with removed ex_force_service_region constructor kwarg in + the CloudFiles driver. (LIBCLOUD-260) + Changes with Apache Libcloud 0.12.3: *) General diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index c7a7ac3c8e..77dc97215a 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -113,6 +113,9 @@ def __init__(self, user_id, key, secure=True, **kwargs): self.accept_format = 'application/json' self.cdn_request = False + if self._ex_force_service_region: + self.service_region = self._ex_force_service_region + def get_endpoint(self): # First, we parse out both files and cdn endpoints # for each auth version @@ -131,6 +134,13 @@ def get_endpoint(self): if self.cdn_request: eps = cdn_eps + if self.service_region: + _eps = [] + for ep in eps: + if ep['region'].lower() == self.service_region.lower(): + _eps.append(ep) + eps = _eps + if len(eps) == 0: raise LibcloudError('Could not find specified endpoint') diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index 9f239b1337..b16cd649ab 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -82,6 +82,23 @@ def test_force_auth_token_kwargs(self): self.assertEquals('/v1/MossoCloudFS', driver.connection.request_path) + def test_invalid_ex_force_service_region(self): + driver = CloudFilesStorageDriver('driver', 'dummy', + ex_force_service_region='invalid') + + try: + driver.list_containers() + except: + e = sys.exc_info()[1] + self.assertEquals(e.value, 'Could not find specified endpoint') + else: + self.fail('Exception was not thrown') + + def test_ex_force_service_region(self): + driver = CloudFilesStorageDriver('driver', 'dummy', + ex_force_service_region='ORD') + driver.list_containers() + def test_force_auth_url_kwargs(self): kwargs = { 'ex_force_auth_version': '2.0', From dfaf2edda19187494a26a3bcc6eb537173482171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 16 Apr 2013 22:48:01 +0000 Subject: [PATCH 045/143] Update version. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1468654 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- libcloud/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index b036554b3c..da361b6f55 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud in development: +Changes with Apache Libcloud 0.12.4: *) Compute diff --git a/libcloud/__init__.py b/libcloud/__init__.py index be3db46d8f..8d88d664d9 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.3' +__version__ = '0.12.4' try: import paramiko From b038aa41235e319058cfd866ab074abee175a5ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 16 Apr 2013 22:56:51 +0000 Subject: [PATCH 046/143] Temporary skip a test case under Python 3.2. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1468658 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/test/compute/test_deployment.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index c4f106db2f..9d1ea37bc6 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -23,6 +23,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import u +from libcloud.utils.py3 import PY32 from libcloud.compute.deployment import MultiStepDeployment, Deployment from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment @@ -114,6 +115,10 @@ def test_script_deployment(self): client=MockClient(hostname='localhost'))) def test_script_file_deployment(self): + # TODO: Fix 3.2 compatibility + if PY32: + return + file_path = os.path.abspath(__file__) with open(file_path, 'rb') as fp: content = fp.read() From ebda889e89a8f08659b234d292438b3a54a9777c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 21 Apr 2013 18:10:12 +0000 Subject: [PATCH 047/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1470328 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 7 +++++++ libcloud/compute/drivers/libvirt_driver.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index da361b6f55..161bae6c37 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,12 @@ -*- coding: utf-8 -*- +Changes with Apache Libcloud in deveploment: + + *) Compute + + - Fix destroy_node method in the experimental libvirt driver. + [Aymen Fitati] + Changes with Apache Libcloud 0.12.4: *) Compute diff --git a/libcloud/compute/drivers/libvirt_driver.py b/libcloud/compute/drivers/libvirt_driver.py index adb4da00da..289ec8ced9 100644 --- a/libcloud/compute/drivers/libvirt_driver.py +++ b/libcloud/compute/drivers/libvirt_driver.py @@ -92,7 +92,7 @@ def reboot_node(self, node): def destroy_node(self, node): domain = self._get_domain_for_node(node=node) - return domain.destroy(flags=0) == 0 + return domain.destroy() == 0 def ex_start(self, node): """ From 4a0a9e871be3ebc4724402c9c88ccec04df77ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sun, 21 Apr 2013 18:21:25 +0000 Subject: [PATCH 048/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1470330 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 +++ libcloud/compute/drivers/joyent.py | 14 ++++++++++++++ libcloud/test/compute/test_joyent.py | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/CHANGES b/CHANGES index 161bae6c37..706f299c7f 100644 --- a/CHANGES +++ b/CHANGES @@ -7,6 +7,9 @@ Changes with Apache Libcloud in deveploment: - Fix destroy_node method in the experimental libvirt driver. [Aymen Fitati] + - Add ex_start_node method to the Joyent driver. (LIBCLOUD-319) + [rszabo50] + Changes with Apache Libcloud 0.12.4: *) Compute diff --git a/libcloud/compute/drivers/joyent.py b/libcloud/compute/drivers/joyent.py index 4a48044c12..f8e3658c2f 100644 --- a/libcloud/compute/drivers/joyent.py +++ b/libcloud/compute/drivers/joyent.py @@ -186,6 +186,20 @@ def ex_stop_node(self, node): data=data, method='POST') return result.status == httplib.ACCEPTED + def ex_start_node(self, node): + """ + Start node + + @param node: The node to be stopped + @type node: L{Node} + + @rtype: C{bool} + """ + data = json.dumps({'action': 'start'}) + result = self.connection.request('/my/machines/%s' % (node.id), + data=data, method='POST') + return result.status == httplib.ACCEPTED + def _to_node(self, data): state = NODE_STATE_MAP[data['state']] public_ips = [] diff --git a/libcloud/test/compute/test_joyent.py b/libcloud/test/compute/test_joyent.py index 4e4078939e..4826e474ab 100644 --- a/libcloud/test/compute/test_joyent.py +++ b/libcloud/test/compute/test_joyent.py @@ -76,6 +76,10 @@ def test_ex_stop_node(self): node = self.driver.list_nodes()[0] self.assertTrue(self.driver.ex_stop_node(node)) + def test_ex_start_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_start_node(node)) + class JoyentHttp(MockHttp): fixtures = ComputeFileFixtures('joyent') From aebc7d888fec87199aa9fdef98cdd8db2060545e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 25 Apr 2013 05:52:05 +0000 Subject: [PATCH 049/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1475640 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 8 ++++++++ libcloud/compute/deployment.py | 5 ++++- libcloud/test/compute/test_deployment.py | 9 ++++----- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 706f299c7f..70cd43a346 100644 --- a/CHANGES +++ b/CHANGES @@ -10,6 +10,14 @@ Changes with Apache Libcloud in deveploment: - Add ex_start_node method to the Joyent driver. (LIBCLOUD-319) [rszabo50] + - Fix Python 3 compatibility issue in the ScriptFileDeployment class. + (LIBCLOUD-321) + [Arfrever Frehtes Taifersar Arahesis] + + *) Load Balancer + + - Add ex_list_current_usage method to the Rackspace driver. + Changes with Apache Libcloud 0.12.4: *) Compute diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index f00222291a..103315cde7 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -22,7 +22,7 @@ import os import binascii -from libcloud.utils.py3 import basestring +from libcloud.utils.py3 import basestring, PY3 class Deployment(object): @@ -188,6 +188,9 @@ def __init__(self, script_file, name=None, delete=False): with open(script_file, 'rb') as fp: content = fp.read() + if PY3: + content = content.decode('utf-8') + super(ScriptFileDeployment, self).__init__(script=content, name=name, delete=delete) diff --git a/libcloud/test/compute/test_deployment.py b/libcloud/test/compute/test_deployment.py index 9d1ea37bc6..50194722c6 100644 --- a/libcloud/test/compute/test_deployment.py +++ b/libcloud/test/compute/test_deployment.py @@ -23,7 +23,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import u -from libcloud.utils.py3 import PY32 +from libcloud.utils.py3 import PY3 from libcloud.compute.deployment import MultiStepDeployment, Deployment from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment @@ -115,14 +115,13 @@ def test_script_deployment(self): client=MockClient(hostname='localhost'))) def test_script_file_deployment(self): - # TODO: Fix 3.2 compatibility - if PY32: - return - file_path = os.path.abspath(__file__) with open(file_path, 'rb') as fp: content = fp.read() + if PY3: + content = content.decode('utf-8') + sfd1 = ScriptFileDeployment(script_file=file_path) self.assertEqual(sfd1.script, content) From 7257c85ef976ecfa3042ceddff22d048203979ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 26 Apr 2013 06:58:17 +0000 Subject: [PATCH 050/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1476076 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++ libcloud/compute/drivers/vcloud.py | 53 +++++++++++++++++++ .../vcloud_1_5/api_vapp_get_metadata.xml | 12 +++++ .../vcloud_1_5/api_vapp_post_metadata.xml | 6 +++ libcloud/test/compute/test_vcloud.py | 17 ++++++ 5 files changed, 92 insertions(+) create mode 100644 libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml create mode 100644 libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml diff --git a/CHANGES b/CHANGES index 70cd43a346..a62d793930 100644 --- a/CHANGES +++ b/CHANGES @@ -14,6 +14,10 @@ Changes with Apache Libcloud in deveploment: (LIBCLOUD-321) [Arfrever Frehtes Taifersar Arahesis] + - Add ex_set_metadata_entry and ex_get_metadata method to the VCloud driver. + (LIBCLOUD-318) + [Michel Samia] + *) Load Balancer - Add ex_list_current_usage method to the Rackspace driver. diff --git a/libcloud/compute/drivers/vcloud.py b/libcloud/compute/drivers/vcloud.py index 735cd7d1a8..35328ca4ca 100644 --- a/libcloud/compute/drivers/vcloud.py +++ b/libcloud/compute/drivers/vcloud.py @@ -1191,6 +1191,59 @@ def ex_set_control_access(self, node, control_access): }, method='POST') + def ex_get_metadata(self, node): + """ + @param node: node + @type node: L{Node} + + @return: dictionary mapping metadata keys to metadata values + @rtype: dictionary mapping C{str} to C{str} + """ + res = self.connection.request('%s/metadata' % (get_url_path(node.id))) + metadata_entries = res.object.findall(fixxpath(res.object, 'MetadataEntry')) + res_dict = {} + + for entry in metadata_entries: + key = entry.findtext(fixxpath(res.object, 'Key')) + value = entry.findtext(fixxpath(res.object, 'Value')) + res_dict[key] = value + + return res_dict + + def ex_set_metadata_entry(self, node, key, value): + """ + @param node: node + @type node: L{Node} + + @param key: metadata key to be set + @type key: C{str} + + @param value: metadata value to be set + @type value: C{str} + + @rtype: C{None} + """ + metadata_elem = ET.Element( + 'Metadata', + {'xmlns': "http://www.vmware.com/vcloud/v1.5", + 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} + ) + entry = ET.SubElement(metadata_elem, 'MetadataEntry') + key_elem = ET.SubElement(entry, 'Key') + key_elem.text = key + value_elem = ET.SubElement(entry, 'Value') + value_elem.text = value + + # send it back to the server + res = self.connection.request( + '%s/metadata' % get_url_path(node.id), + data=ET.tostring(metadata_elem), + headers={ + 'Content-Type': 'application/vnd.vmware.vcloud.metadata+xml' + }, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None, sort_desc=None): """ diff --git a/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml b/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml new file mode 100644 index 0000000000..31d88a824d --- /dev/null +++ b/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml @@ -0,0 +1,12 @@ + + + + + + + + + owners + msamia@netsuite.com + + diff --git a/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml b/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml new file mode 100644 index 0000000000..04c7049412 --- /dev/null +++ b/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/libcloud/test/compute/test_vcloud.py b/libcloud/test/compute/test_vcloud.py index 46095dd370..bcf24a863b 100644 --- a/libcloud/test/compute/test_vcloud.py +++ b/libcloud/test/compute/test_vcloud.py @@ -270,6 +270,15 @@ def test_ex_set_control_access(self): access_level = ControlAccess.AccessLevel.FULL_CONTROL)]) self.driver.ex_set_control_access(node, control_access) + def test_ex_get_metadata(self): + node = Node('https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) + metadata = self.driver.ex_get_metadata(node) + self.assertEqual(metadata, {'owners':'msamia@netsuite.com'}) + + def test_ex_set_metadata_entry(self): + node = Node('https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) + self.driver.ex_set_metadata_entry(node, 'foo', 'bar') + class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin): @@ -558,6 +567,14 @@ def _api_query(self, method, url, body, headers): raise AssertionError('Unexpected query type') return httplib.OK, body, headers, httplib.responses[httplib.OK] + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('api_vapp_post_metadata.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + else: + body = self.fixtures.load('api_vapp_get_metadata.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers): body = self.fixtures.load('api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] From 20fce2dae2932ea6c40e020d32e6a8d5249dfa44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 4 May 2013 05:56:45 +0000 Subject: [PATCH 051/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1479042 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 + libcloud/compute/drivers/vcloud.py | 18 +- ...p_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml | 290 ++++++++++++++++++ ...c_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml | 3 +- .../fixtures/vcloud_1_5/api_vdc_brokenVdc.xml | 56 ++++ libcloud/test/compute/test_vcloud.py | 35 +++ 6 files changed, 396 insertions(+), 9 deletions(-) create mode 100644 libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml create mode 100644 libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml diff --git a/CHANGES b/CHANGES index a62d793930..e61795c575 100644 --- a/CHANGES +++ b/CHANGES @@ -18,6 +18,9 @@ Changes with Apache Libcloud in deveploment: (LIBCLOUD-318) [Michel Samia] + - Various improvements and bug-fixes in the VCloud driver. (LIBCLOUD-323) + [Michel Samia] + *) Load Balancer - Add ex_list_current_usage method to the Rackspace driver. diff --git a/libcloud/compute/drivers/vcloud.py b/libcloud/compute/drivers/vcloud.py index 35328ca4ca..e7d9a7e92a 100644 --- a/libcloud/compute/drivers/vcloud.py +++ b/libcloud/compute/drivers/vcloud.py @@ -20,7 +20,6 @@ import re import base64 import os -import urllib from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.utils.py3 import urlparse @@ -56,7 +55,6 @@ Valid vCloud API v1.5 input values. """ VIRTUAL_CPU_VALS_1_5 = [i for i in range(1, 9)] -VIRTUAL_MEMORY_VALS_1_5 = [2 ** i for i in range(2, 19)] FENCE_MODE_VALS_1_5 = ['bridged', 'isolated', 'natRouted'] IP_MODE_VALS_1_5 = ['POOL', 'DHCP', 'MANUAL', 'NONE'] @@ -590,9 +588,10 @@ def ex_list_nodes(self, vdcs=None): except Exception: # The vApp was probably removed since the previous vDC query, ignore e = sys.exc_info()[1] - if not (e.args[0].tag.endswith('Error') and + if not (isinstance(e.args[0], _ElementInterface) and + e.args[0].tag.endswith('Error') and e.args[0].get('minorErrorCode') == 'ACCESS_TO_RESOURCE_IS_FORBIDDEN'): - raise e + raise return nodes @@ -1573,7 +1572,7 @@ def _validate_vm_names(names): def _validate_vm_memory(vm_memory): if vm_memory is None: return - elif vm_memory not in VIRTUAL_MEMORY_VALS_1_5: + elif vm_memory not in VIRTUAL_MEMORY_VALS: raise ValueError( '%s is not a valid vApp VM memory value' % vm_memory) @@ -1859,15 +1858,18 @@ def _to_node(self, node_elm): public_ips.append(external_ip.text) elif ip is not None: public_ips.append(ip.text) + os_type_elem = vm_elem.find('{http://schemas.dmtf.org/ovf/envelope/1}OperatingSystemSection') + if os_type_elem: + os_type = os_type_elem.get('{http://www.vmware.com/schema/ovf}osType') + else: + os_type = None vm = { 'id': vm_elem.get('href'), 'name': vm_elem.get('name'), 'state': self.NODE_STATE_MAP[vm_elem.get('status')], 'public_ips': public_ips, 'private_ips': private_ips, - 'os_type': vm_elem - .find('{http://schemas.dmtf.org/ovf/envelope/1}OperatingSystemSection') - .get('{http://www.vmware.com/schema/ovf}osType') + 'os_type': os_type } vms.append(vm) diff --git a/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml b/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml new file mode 100644 index 0000000000..75712d3f04 --- /dev/null +++ b/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml @@ -0,0 +1,290 @@ + + + + + + + + + + + + + + + + + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + + + + + The configuration parameters for logical networks + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.myorg.com + + + 65.41.67.1 + 65.41.67.254 + + + + + bridged + false + + + false + 3600 + 7200 + + 65.41.64.2 + 65.41.67.0 + + + + true + drop + false + + true + Allow all outgoing traffic + allow + + true + + -1 + Any + -1 + Any + out + false + + + + true + ipTranslation + allowTraffic + + + automatic + ScrumVM_Master + 0 + + + + + false + + + + + true + + + + + + false + + + + + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + mgalet-test2 + vmx-07 + + + 00:50:56:01:00:99 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the available VM network connections + 0 + + 0 + 192.168.0.100 + 192.168.0.103 + true + 00:50:56:01:00:d9 + POOL + + + + Specifies Guest OS Customization Settings + true + false + dd75d1d3-5b7b-48f0-aff3-69622ab7e045 + false + false + true + true + sN#9QH9# + false + mgalet-test2 + + + + Specifies Runtime info + + + ScrumVM_Master + + + VMware ESXi + 5.0.0 + VMware, Inc. + en + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml b/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml index 96e14fc587..3e55b5d09d 100644 --- a/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml +++ b/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml @@ -36,6 +36,7 @@ + @@ -53,4 +54,4 @@ 1024 150 true - \ No newline at end of file + diff --git a/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml b/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml new file mode 100644 index 0000000000..80993ea602 --- /dev/null +++ b/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + AllocationPool + + MB + 5120000 + 5120000 + 1984512 + 0 + + + + MHz + 130000 + 160000 + 0 + 0 + + + MB + 527360 + 527360 + 130752 + 0 + + + + + + + + + + + + + + vmx-04 + vmx-07 + vmx-08 + + + 0 + 1024 + 150 + true + diff --git a/libcloud/test/compute/test_vcloud.py b/libcloud/test/compute/test_vcloud.py index bcf24a863b..87324bd149 100644 --- a/libcloud/test/compute/test_vcloud.py +++ b/libcloud/test/compute/test_vcloud.py @@ -21,6 +21,7 @@ from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess +from libcloud.compute.drivers.vcloud import Vdc from libcloud.compute.base import Node, NodeImage from libcloud.compute.types import NodeState @@ -241,6 +242,13 @@ def test_vdcs(self): def test_ex_list_nodes(self): self.assertEqual(len(self.driver.ex_list_nodes()), len(self.driver.list_nodes())) + def test_ex_list_nodes__masked_exception(self): + """ + Test that we don't mask other exceptions. + """ + brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver) + self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc)) + def test_ex_power_off(self): node = Node('https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', 'testNode', NodeState.RUNNING, [], [], self.driver) self.driver.ex_power_off_node(node) @@ -395,6 +403,21 @@ def _api_v0_8_task_11001(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) +class AnotherErrorMember(Exception): + """ + helper class for the synthetic exception + """ + + def __init__(self): + self.tag = 'Error' + + def get(self, foo): + return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN' + +class AnotherError(Exception): + pass + + class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('vcloud_1_5') @@ -427,6 +450,14 @@ def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, heade body = self.fixtures.load('api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] + def _api_vdc_brokenVdc(self, method, url, body, headers): + body = self.fixtures.load('api_vdc_brokenVdc.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_errorRaiser(self, method, url, body, headers): + m = AnotherErrorMember() + raise AnotherError(m) + def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers): body = self.fixtures.load('api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] @@ -457,6 +488,10 @@ def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, body = self.fixtures.load('api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml') return httplib.OK, body, headers, httplib.responses[httplib.OK] + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers): + body = self.fixtures.load('api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers): body = self.fixtures.load('put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] From 375896ef8ef2531a7463219af8253cf1b087dc9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 7 May 2013 20:43:14 +0000 Subject: [PATCH 052/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1480066 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 7 +++++++ libcloud/security.py | 3 +++ 2 files changed, 10 insertions(+) diff --git a/CHANGES b/CHANGES index e61795c575..fb0a6600d4 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,13 @@ Changes with Apache Libcloud in deveploment: + *) General + + - Add homebrew curl-ca-bundle path to CA_CERTS_PATH. This will make Libcloud + use homebrew curl ca bundle file (if available) for server certificate + validation. (LIBCLOUD-324) + [Robert Chiniquy] + *) Compute - Fix destroy_node method in the experimental libvirt driver. diff --git a/libcloud/security.py b/libcloud/security.py index 72d532882b..30a7a5a8c5 100644 --- a/libcloud/security.py +++ b/libcloud/security.py @@ -42,6 +42,9 @@ # macports: curl-ca-bundle '/opt/local/share/curl/curl-ca-bundle.crt', + + # homebrew: curl-ca-bundle + '/usr/local/opt/curl-ca-bundle/share/ca-bundle.crt', ] # Allow user to explicitly specify which CA bundle to use, using an environment From 03944f83ff4ca712c06e19839ddc16e593c05ae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 14 May 2013 06:59:46 +0000 Subject: [PATCH 053/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1482232 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++ libcloud/compute/drivers/openstack.py | 52 ++++++++++++++++++------- libcloud/test/compute/test_openstack.py | 2 + 3 files changed, 46 insertions(+), 13 deletions(-) diff --git a/CHANGES b/CHANGES index fb0a6600d4..37ff26ebd1 100644 --- a/CHANGES +++ b/CHANGES @@ -28,6 +28,11 @@ Changes with Apache Libcloud in deveploment: - Various improvements and bug-fixes in the VCloud driver. (LIBCLOUD-323) [Michel Samia] + - Modify list_sizes method in the OpenStack driver to include + OpenStackNodeSize object which includes 'vcpus' attribute which holds + a number of virtual CPUs for this size. (LIBCLOUD-325) + [Carlo] + *) Load Balancer - Add ex_list_current_usage method to the Rackspace driver. diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index abf5bda2bc..8c2401a96e 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -23,8 +23,6 @@ import warnings -from itertools import chain - from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.py3 import next @@ -272,6 +270,30 @@ def ex_hard_reboot_node(self, node): return self._reboot_node(node, reboot_type='HARD') +class OpenStackNodeSize(NodeSize): + """ + NodeSize class for the OpenStack.org driver. + + Following the example of OpenNebula.org driver + and following guidelines: + https://issues.apache.org/jira/browse/LIBCLOUD-119 + """ + + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + vcpus=None): + super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram, + disk=disk, + bandwidth=bandwidth, + price=price, driver=driver) + self.vcpus = vcpus + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name, self.vcpus)) + + class OpenStack_1_0_Response(OpenStackResponse): def __init__(self, *args, **kwargs): # done because of a circular reference from @@ -769,15 +791,17 @@ def _to_sizes(self, object): return [self._to_size(el) for el in elements] def _to_size(self, el): - return NodeSize(id=el.get('id'), - name=el.get('name'), - ram=int(el.get('ram')), - disk=int(el.get('disk')), - # XXX: needs hardcode - bandwidth=None, - # Hardcoded - price=self._get_size_price(el.get('id')), - driver=self.connection.driver) + vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None + return OpenStackNodeSize(id=el.get('id'), + name=el.get('name'), + ram=int(el.get('ram')), + disk=int(el.get('disk')), + # XXX: needs hardcode + vcpus=vcpus, + bandwidth=None, + # Hardcoded + price=self._get_size_price(el.get('id')), + driver=self.connection.driver) def ex_limits(self): """ @@ -1092,7 +1116,8 @@ def create_node(self, **kwargs): server_object = server_resp.object['server'] # adminPass is not always present - # http://docs.openstack.org/essex/openstack-compute/admin/content/configuring-compute-API.html#d6e1833 + # http://docs.openstack.org/essex/openstack-compute/admin/ + # content/configuring-compute-API.html#d6e1833 server_object['adminPass'] = create_response.get('adminPass', None) return self._to_node(server_object) @@ -1639,11 +1664,12 @@ def _to_size(self, api_flavor, price=None, bandwidth=None): if not price: price = self._get_size_price(str(api_flavor['id'])) - return NodeSize( + return OpenStackNodeSize( id=api_flavor['id'], name=api_flavor['name'], ram=api_flavor['ram'], disk=api_flavor['disk'], + vcpus=api_flavor['vcpus'], bandwidth=bandwidth, price=price, driver=self, diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index a73f1ab22c..64828b3003 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -672,6 +672,8 @@ def test_list_sizes(self): self.assertTrue(isinstance(size.price, float), 'Wrong size price type') + self.assertEqual(sizes[0].vcpus, 8) + def test_list_sizes_with_specified_pricing(self): pricing = dict((str(i), i * 5.0) for i in range(1, 9)) From 83903a4dd189f8981040d3a1f3f96d4c4c1afc07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 18 May 2013 22:47:51 +0000 Subject: [PATCH 054/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1484195 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 8 ++++++++ libcloud/compute/drivers/ec2.py | 15 +++++++++++++-- libcloud/test/compute/test_ec2.py | 14 ++++++++++++++ 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 37ff26ebd1..579b5f84eb 100644 --- a/CHANGES +++ b/CHANGES @@ -33,6 +33,14 @@ Changes with Apache Libcloud in deveploment: a number of virtual CPUs for this size. (LIBCLOUD-325) [Carlo] + - For consistency rename "ex_describe_keypairs" method in the EC2 driver to + "ex_describe_keypair". + [Tomaz Muraus] + + - Modify "ex_describe_keypair" method to return key fingerprint in the + return value. (LIBCLOUD-326) + [Andre Merzky, Tomaz Muraus] + *) Load Balancer - Add ex_list_current_usage method to the Rackspace driver. diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index bb303d86c5..26018ac122 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -800,7 +800,14 @@ def ex_describe_all_keypairs(self): return names def ex_describe_keypairs(self, name): - """Describes a keypair by name + """ + Here for backward compatibility. + """ + return self.ex_describe_keypair(name=name) + + def ex_describe_keypair(self, name): + """ + Describes a keypair by name. @note: This is a non-standard extension API, and only works for EC2. @@ -818,8 +825,12 @@ def ex_describe_keypairs(self, name): response = self.connection.request(self.path, params=params).object key_name = findattr(element=response, xpath='keySet/item/keyName', namespace=NAMESPACE) + fingerprint = findattr(element=response, + xpath='keySet/item/keyFingerprint', + namespace=NAMESPACE).strip() return { - 'keyName': key_name + 'keyName': key_name, + 'keyFingerprint': fingerprint } def ex_list_security_groups(self): diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 25944f3673..260a4f73d4 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -267,6 +267,20 @@ def test_ex_describe_all_keypairs(self): keys = self.driver.ex_describe_all_keypairs() self.assertEqual(keys, ['gsg-keypair']) + def test_ex_describe_keypairs(self): + keypair1 = self.driver.ex_describe_keypair('gsg-keypair') + + # Test backward compatibility + keypair2 = self.driver.ex_describe_keypairs('gsg-keypair') + + fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ + '00:00:00:00:00' + + self.assertEqual(keypair1['keyName'], 'gsg-keypair') + self.assertEqual(keypair1['keyFingerprint'], fingerprint) + self.assertEqual(keypair2['keyName'], 'gsg-keypair') + self.assertEqual(keypair2['keyFingerprint'], fingerprint) + def test_ex_describe_tags(self): node = Node('i-4382922a', None, None, None, None, self.driver) tags = self.driver.ex_describe_tags(resource=node) From fbb1e35a99ceae11584a864cf69e52fabdefd5b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 21 May 2013 20:11:06 +0000 Subject: [PATCH 055/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1484932 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/common/openstack.py | 22 ++++++++++++++++++++-- libcloud/test/compute/test_openstack.py | 24 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index b9952ef5b1..d329ad31a2 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -265,6 +265,20 @@ def __init__(self, service_catalog, ex_force_auth_version=None): raise LibcloudError('auth version "%s" not supported' % (self._auth_version)) + def get_catalog(self): + return self._service_catalog + + def get_public_urls(self, service_type=None, name=None): + endpoints = self.get_endpoints(service_type=service_type, + name=name) + + result = [] + for endpoint in endpoints: + if 'publicURL' in endpoint: + result.append(endpoint['publicURL']) + + return result + def get_endpoints(self, service_type=None, name=None): eps = [] @@ -280,7 +294,6 @@ def get_endpoints(self, service_type=None, name=None): return eps def get_endpoint(self, service_type=None, name=None, region=None): - if '2.0' in self._auth_version: endpoint = self._service_catalog.get(service_type, {}) \ .get(name, {}).get(region, []) @@ -294,7 +307,6 @@ def get_endpoint(self, service_type=None, name=None, region=None): return {} def _parse_auth_v1(self, service_catalog): - for service, endpoints in service_catalog.items(): self._service_catalog[service] = {} @@ -423,6 +435,12 @@ def __init__(self, user_id, key, secure=True, super(OpenStackBaseConnection, self).__init__( user_id, key, secure=secure, timeout=timeout) + def get_service_catalog(self): + if self.service_catalog is None: + self._populate_hosts_and_request_paths() + + return self.service_catalog + def get_endpoint(self): """ Selects the endpoint to use based on provider specific values, diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 64828b3003..394d928048 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -26,6 +26,7 @@ from libcloud.common.types import InvalidCredsError, MalformedResponseError, \ LibcloudError +from libcloud.common.openstack import OpenStackBaseConnection from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.drivers.openstack import ( @@ -68,6 +69,29 @@ def test_non_xml_content_type_handling(self): self.assertEqual(body, RESPONSE_BODY, "Non-XML body should be returned as is") +class OpenStackServiceCatalogTests(unittest.TestCase): + def test_connection_get_service_catalog(self): + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + connection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) + connection.auth_url = "https://auth.api.example.com/v1.1/" + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + result = connection.get_service_catalog() + catalog = result.get_catalog() + endpoints = result.get_endpoints('cloudFilesCDN', 'cloudFilesCDN') + public_urls = result.get_public_urls('cloudFilesCDN', 'cloudFilesCDN') + + expected_urls = [ + 'https://cdn2.clouddrive.com/v1/MossoCloudFS', + 'https://cdn2.clouddrive.com/v1/MossoCloudFS' + ] + + self.assertTrue('cloudFilesCDN' in catalog) + self.assertEqual(len(endpoints), 2) + self.assertEqual(public_urls, expected_urls) + + class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False From 6d624ddd560fb3a7f7bd326522413bc887643ca1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 23 May 2013 20:18:55 +0000 Subject: [PATCH 056/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1485843 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 17 +++ libcloud/common/openstack.py | 95 ++++++++++++++-- libcloud/storage/drivers/cloudfiles.py | 62 +++++++---- libcloud/test/compute/test_openstack.py | 134 ++++++++++++++++++++++- libcloud/test/storage/test_cloudfiles.py | 54 ++++++++- libcloud/utils/misc.py | 13 ++- tox.ini | 3 +- 7 files changed, 340 insertions(+), 38 deletions(-) diff --git a/CHANGES b/CHANGES index 579b5f84eb..824c5e1da8 100644 --- a/CHANGES +++ b/CHANGES @@ -9,6 +9,15 @@ Changes with Apache Libcloud in deveploment: validation. (LIBCLOUD-324) [Robert Chiniquy] + - Modify OpenStackAuthConnection and change auth_token_expires attribute to + be a datetime object instead of a string. + [Tomaz Muraus] + + - Modify OpenStackAuthConnection to support re-using of the existing auth + token if it's still valid instead of re-authenticating on every + authenticate() call. + [Tomaz Muraus] + *) Compute - Fix destroy_node method in the experimental libvirt driver. @@ -41,6 +50,14 @@ Changes with Apache Libcloud in deveploment: return value. (LIBCLOUD-326) [Andre Merzky, Tomaz Muraus] + *) Storage + + - Fix an issue with double encoding the container name in the CloudFiles + driver upload_object method. + Also properly encode container and object name used in the HTTP request + in the get_container and get_object method. (LIBCLOUD-328) + [Tomaz Muraus] + *) Load Balancer - Add ex_list_current_usage method to the Rackspace driver. diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index d329ad31a2..d2eff93b88 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -19,8 +19,10 @@ import sys import binascii import os +import datetime from libcloud.utils.py3 import httplib +from libcloud.utils.iso8601 import parse_date from libcloud.common.base import ConnectionUserAndKey, Response from libcloud.compute.types import (LibcloudError, InvalidCredsError, @@ -33,10 +35,30 @@ AUTH_API_VERSION = '1.1' +# Auth versions which contain token expiration information. +AUTH_VERSIONS_WITH_EXPIRES = [ + '1.1', + '2.0', + '2.0_apikey', + '2.0_password' +] + +# How many seconds to substract from the auth token expiration time before +# testing if the token is still valid. +# The time is subtracted to account for the HTTP request latency and prevent +# user from getting "InvalidCredsError" if token is about to expire. +AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5 + __all__ = [ + 'OpenStackBaseConnection', + 'OpenStackAuthConnection', + 'OpenStackServiceCatalog', + 'OpenStackDriverMixin', "OpenStackBaseConnection", - "OpenStackAuthConnection", - ] + "OpenStackAuthConnection" + + 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS' +] # @TODO: Refactor for re-use by other openstack drivers @@ -87,17 +109,19 @@ def __init__(self, parent_conn, auth_url, auth_version, user_id, key, # enable tests to use the same mock connection classes. self.conn_classes = parent_conn.conn_classes - if timeout: - self.timeout = timeout - super(OpenStackAuthConnection, self).__init__( - user_id, key, url=auth_url, timeout=self.timeout) + user_id, key, url=auth_url, timeout=timeout) self.auth_version = auth_version self.auth_url = auth_url - self.urls = {} self.driver = self.parent_conn.driver self.tenant_name = tenant_name + self.timeout = timeout + + self.urls = {} + self.auth_token = None + self.auth_token_expires = None + self.auth_user_info = None def morph_action_hook(self, action): return action @@ -107,7 +131,19 @@ def add_default_headers(self, headers): headers['Content-Type'] = 'application/json; charset=UTF-8' return headers - def authenticate(self): + def authenticate(self, force=False): + """ + Authenticate against the keystone api. + + @param force: Forcefully update the token even if it's already cached + and still valid. + @type force: C{bool} + """ + if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \ + and self._is_token_valid(): + # If token is still valid, there is no need to re-authenticate + return self + if self.auth_version == "1.0": return self.authenticate_1_0() elif self.auth_version == "1.1": @@ -153,6 +189,8 @@ def authenticate_1_0(self): raise MalformedResponseError('Missing X-Auth-Token in \ response headers') + return self + def authenticate_1_1(self): reqbody = json.dumps({'credentials': {'username': self.user_id, 'key': self.key}}) @@ -174,9 +212,12 @@ def authenticate_1_1(self): except Exception: e = sys.exc_info()[1] raise MalformedResponseError('Failed to parse JSON', e) + try: + expires = body['auth']['token']['expires'] + self.auth_token = body['auth']['token']['id'] - self.auth_token_expires = body['auth']['token']['expires'] + self.auth_token_expires = parse_date(expires) self.urls = body['auth']['serviceCatalog'] self.auth_user_info = None except KeyError: @@ -184,6 +225,8 @@ def authenticate_1_1(self): raise MalformedResponseError('Auth JSON response is \ missing required elements', e) + return self + def authenticate_2_0_with_apikey(self): # API Key based authentication uses the RAX-KSKEY extension. # http://s.apache.org/oAi @@ -228,8 +271,10 @@ def authenticate_2_0_with_body(self, reqbody): try: access = body['access'] + expires = access['token']['expires'] + self.auth_token = access['token']['id'] - self.auth_token_expires = access['token']['expires'] + self.auth_token_expires = parse_date(expires) self.urls = access['serviceCatalog'] self.auth_user_info = access.get('user', {}) except KeyError: @@ -237,6 +282,32 @@ def authenticate_2_0_with_body(self, reqbody): raise MalformedResponseError('Auth JSON response is \ missing required elements', e) + return self + + def _is_token_valid(self): + """ + Return True if the current taken is already cached and hasn't expired + yet. + + @rtype: C{bool} + """ + if not self.auth_token: + return False + + if not self.auth_token_expires: + return False + + expires = self.auth_token_expires - \ + datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS) + + time_tuple_expires = expires.utctimetuple() + time_tuple_now = datetime.datetime.utcnow().utctimetuple() + + # TODO: Subtract some reasonable grace time period + if time_tuple_now < time_tuple_expires: + return True + + return False class OpenStackServiceCatalog(object): """ @@ -421,6 +492,8 @@ def __init__(self, user_id, key, secure=True, self._ex_force_service_name = ex_force_service_name self._ex_force_service_region = ex_force_service_region + self._osa = None + if ex_force_auth_token: self.auth_token = ex_force_auth_token @@ -487,7 +560,7 @@ def _populate_hosts_and_request_paths(self): if not self.auth_token: aurl = self.auth_url - if self._ex_force_auth_url != None: + if self._ex_force_auth_url is not None: aurl = self._ex_force_auth_url if aurl == None: diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index 77dc97215a..d3fddc8a01 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -244,7 +244,8 @@ def list_container_objects(self, container): return LazyList(get_more=self._get_more, value_dict=value_dict) def get_container(self, container_name): - response = self.connection.request('/%s' % (container_name), + container_name_encoded = self._encode_container_name(container_name) + response = self.connection.request('/%s' % (container_name_encoded), method='HEAD') if response.status == httplib.NO_CONTENT: @@ -258,8 +259,11 @@ def get_container(self, container_name): def get_object(self, container_name, object_name): container = self.get_container(container_name) - response = self.connection.request('/%s/%s' % (container_name, - object_name), + container_name_encoded = self._encode_container_name(container_name) + object_name_encoded = self._encode_container_name(object_name) + + response = self.connection.request('/%s/%s' % (container_name_encoded, + object_name_encoded), method='HEAD') if response.status in [httplib.OK, httplib.NO_CONTENT]: obj = self._headers_to_object( @@ -311,9 +315,9 @@ def enable_container_cdn(self, container, ex_ttl=None): return response.status in [httplib.CREATED, httplib.ACCEPTED] def create_container(self, container_name): - container_name = self._clean_container_name(container_name) + container_name_encoded = self._encode_container_name(container_name) response = self.connection.request( - '/%s' % (container_name), method='PUT') + '/%s' % (container_name_encoded), method='PUT') if response.status == httplib.CREATED: # Accepted mean that container is not yet created but it will be @@ -330,7 +334,7 @@ def create_container(self, container_name): raise LibcloudError('Unexpected status code: %s' % (response.status)) def delete_container(self, container): - name = self._clean_container_name(container.name) + name = self._encode_container_name(container.name) # Only empty container can be deleted response = self.connection.request('/%s' % (name), method='DELETE') @@ -405,8 +409,8 @@ def upload_object_via_stream(self, iterator, extra=extra, iterator=iterator) def delete_object(self, obj): - container_name = self._clean_container_name(obj.container.name) - object_name = self._clean_object_name(obj.name) + container_name = self._encode_container_name(obj.container.name) + object_name = self._encode_object_name(obj.name) response = self.connection.request( '/%s/%s' % (container_name, object_name), method='DELETE') @@ -419,6 +423,26 @@ def delete_object(self, obj): raise LibcloudError('Unexpected status code: %s' % (response.status)) + def ex_purge_object_from_cdn(self, obj, email=None): + """ + Purge edge cache for the specified object. + + @param email: Email where a notification will be sent when the job + completes. (optional) + @type email: C{str} + """ + container_name = self._encode_container_name(obj.container.name) + object_name = self._encode_object_name(obj.name) + headers = {'X-Purge-Email': email} if email else {} + + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='DELETE', + headers=headers, + cdn_request=True) + + return response.status == httplib.NO_CONTENT + def ex_get_meta_data(self): """ Get meta data @@ -594,14 +618,14 @@ def _upload_object_manifest(self, container, object_name, extra=None, extra = extra or {} meta_data = extra.get('meta_data') - container_name_cleaned = self._clean_container_name(container.name) - object_name_cleaned = self._clean_object_name(object_name) - request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + container_name_encoded = self._encode_container_name(container.name) + object_name_encoded = self._encode_object_name(object_name) + request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) headers = {'X-Auth-Token': self.connection.auth_token, 'X-Object-Manifest': '%s/%s/' % - (container_name_cleaned, - object_name_cleaned)} + (container_name_encoded, + object_name_encoded)} data = '' response = self.connection.request(request_path, @@ -657,8 +681,8 @@ def _put_object(self, container, object_name, upload_func, upload_func_kwargs, extra=None, file_path=None, iterator=None, verify_hash=True): extra = extra or {} - container_name_cleaned = self._clean_container_name(container.name) - object_name_cleaned = self._clean_object_name(object_name) + container_name_encoded = self._encode_container_name(container.name) + object_name_encoded = self._encode_object_name(object_name) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) @@ -668,7 +692,7 @@ def _put_object(self, container, object_name, upload_func, key = 'X-Object-Meta-%s' % (key) headers[key] = value - request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) result_dict = self._upload_object( object_name=object_name, content_type=content_type, upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, @@ -702,9 +726,9 @@ def _put_object(self, container, object_name, upload_func, raise LibcloudError('status_code=%s' % (response.status), driver=self) - def _clean_container_name(self, name): + def _encode_container_name(self, name): """ - Clean container name. + Encode container name so it can be used as part of the HTTP request. """ if name.startswith('/'): name = name[1:] @@ -722,7 +746,7 @@ def _clean_container_name(self, name): return name - def _clean_object_name(self, name): + def _encode_object_name(self, name): name = urlquote(name) return name diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 394d928048..a69b998f45 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -12,14 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import sys import unittest +import datetime try: import simplejson as json except ImportError: import json +from mock import Mock + from libcloud.utils.py3 import httplib from libcloud.utils.py3 import method_type from libcloud.utils.py3 import u @@ -27,6 +31,8 @@ from libcloud.common.types import InvalidCredsError, MalformedResponseError, \ LibcloudError from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.openstack import OpenStackAuthConnection +from libcloud.common.openstack import AUTH_TOKEN_EXPIRES_GRACE_SECONDS from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.drivers.openstack import ( @@ -70,6 +76,7 @@ def test_non_xml_content_type_handling(self): class OpenStackServiceCatalogTests(unittest.TestCase): + # TODO refactor and move into libcloud/test/common def test_connection_get_service_catalog(self): connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) connection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) @@ -84,14 +91,129 @@ def test_connection_get_service_catalog(self): expected_urls = [ 'https://cdn2.clouddrive.com/v1/MossoCloudFS', - 'https://cdn2.clouddrive.com/v1/MossoCloudFS' ] self.assertTrue('cloudFilesCDN' in catalog) - self.assertEqual(len(endpoints), 2) + self.assertEqual(len(endpoints), len(expected_urls)) self.assertEqual(public_urls, expected_urls) +class OpenStackAuthConnectionTests(unittest.TestCase): + # TODO refactor and move into libcloud/test/common + def test_basic_authentication(self): + tuples = [ + ('1.0', OpenStackMockHttp), + ('1.1', OpenStackMockHttp), + ('2.0', OpenStack_2_0_MockHttp), + ('2.0_apikey', OpenStack_2_0_MockHttp), + ('2.0_password', OpenStack_2_0_MockHttp) + ] + + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + for (auth_version, mock_http_class) in tuples: + connection = \ + self._get_mock_connection(mock_http_class=mock_http_class) + auth_url = connection.auth_url + + osa = OpenStackAuthConnection(connection, auth_url, auth_version, + user_id, key) + + self.assertEqual(osa.urls, {}) + self.assertEqual(osa.auth_token, None) + self.assertEqual(osa.auth_user_info, None) + osa = osa.authenticate() + + self.assertTrue(len(osa.urls) >= 1) + self.assertTrue(osa.auth_token is not None) + + if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_token_expires is not None) + + if auth_version in ['2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_user_info is not None) + + def test_token_expiration_and_force_reauthentication(self): + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + connection = self._get_mock_connection(OpenStack_2_0_MockHttp) + auth_url = connection.auth_url + auth_version = '2.0' + + yesterday = datetime.datetime.today() - datetime.timedelta(1) + tomorrow = datetime.datetime.today() + datetime.timedelta(1) + + osa = OpenStackAuthConnection(connection, auth_url, auth_version, + user_id, key) + + mocked_auth_method = Mock(wraps=osa.authenticate_2_0_with_body) + osa.authenticate_2_0_with_body = mocked_auth_method + + # Force re-auth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + count = 5 + + for i in range(0, count): + osa.authenticate(force=True) + + self.assertEqual(mocked_auth_method.call_count, count) + + # No force reauth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + self.assertEqual(mocked_auth_method.call_count, count) + + # No force reauth, valid / non-expired token + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + if i == 0: + osa.auth_token_expires = tomorrow + + self.assertEqual(mocked_auth_method.call_count, 1) + + # No force reauth, valid / non-expired token which is about to expire in + # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS + soon = datetime.datetime.now() + \ + datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + if i == 0: + osa.auth_token_expires = soon + + self.assertEqual(mocked_auth_method.call_count, 5) + + def _get_mock_connection(self, mock_http_class): + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + connection.conn_classes = (mock_http_class, mock_http_class) + connection.auth_url = "https://auth.api.example.com/v1.1/" + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + return connection + + class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False @@ -127,7 +249,9 @@ def test_auth_token_is_set(self): def test_auth_token_expires_is_set(self): self.driver.connection._populate_hosts_and_request_paths() - self.assertEquals(self.driver.connection.auth_token_expires, "2011-09-18T02:44:17.000-05:00") + + expires = self.driver.connection.auth_token_expires + self.assertEquals(expires.isoformat(), "2011-09-18T02:44:17-05:00") def test_auth(self): OpenStackMockHttp.type = 'UNAUTHORIZED' @@ -603,7 +727,8 @@ def test_auth_token_expires_is_set(self): self.driver.connection.auth_token_expires = None self.driver.connection._populate_hosts_and_request_paths() - self.assertEquals(self.driver.connection.auth_token_expires, "2011-11-23T21:00:14.000-06:00") + expires = self.driver.connection.auth_token_expires + self.assertEquals(expires.isoformat(), "2011-11-23T21:00:14-06:00") def test_ex_force_base_url(self): # change base url and trash the current auth token so we can re-authenticate @@ -1066,7 +1191,6 @@ def _v1_1_slug_servers_12067(self, method, url, body, headers): raise NotImplementedError() - def _v1_1_slug_servers_12064(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_servers_12064.json') diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index b16cd649ab..e0390b54ec 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -28,6 +29,7 @@ from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlquote if PY3: from io import FileIO as file @@ -44,6 +46,7 @@ from libcloud.storage.drivers.dummy import DummyIterator from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import StorageFileFixtures, OpenStackFixtures # pylint: disable-msg=E0611 current_hash = None @@ -623,6 +626,26 @@ def test__upload_object_manifest_wrong_hash(self): finally: self.driver.connection.request = _request + def test_create_container_put_object_name_encoding(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + + container_name = 'speci@l_name' + object_name = 'm@obj€ct' + file_path = os.path.abspath(__file__) + + container = self.driver.create_container(container_name=container_name) + self.assertEqual(container.name, container_name) + + obj = self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name) + self.assertEqual(obj.name, object_name) + CloudFilesStorageDriver._upload_file = old_func + def test_ex_enable_static_website(self): container = Container(name='foo_bar_container', extra={}, driver=self) result = self.driver.ex_enable_static_website(container=container, @@ -680,7 +703,7 @@ def _remove_test_file(self): pass -class CloudFilesMockHttp(StorageMockHttp): +class CloudFilesMockHttp(StorageMockHttp, MockHttpTestCase): fixtures = StorageFileFixtures('cloudfiles') auth_fixtures = OpenStackFixtures() @@ -828,6 +851,22 @@ def _v1_MossoCloudFS_test_create_container( status_code = httplib.CREATED return (status_code, body, headers, httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_speci_40l_name(self, method, url, body, headers): + # test_create_container_put_object_name_encoding + # Verify that the name is properly url encoded + container_name = 'speci@l_name' + encoded_container_name = urlquote(container_name) + self.assertTrue(encoded_container_name in url) + + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers = copy.deepcopy(self.base_headers) + headers.update({ 'content-length': 18, + 'date': 'Mon, 28 Feb 2011 07:52:57 GMT' + }) + status_code = httplib.CREATED + return (status_code, body, headers, httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_test_create_container_ALREADY_EXISTS( self, method, url, body, headers): # test_create_container_already_exists @@ -911,6 +950,19 @@ def _v1_MossoCloudFS_foo_bar_container_foo_test_upload( headers['etag'] = 'hash343hhash89h932439jsaa89' return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_speci_40l_name_m_40obj_E2_82_ACct(self, method, url, + body, headers): + # test_create_container_put_object_name_encoding + # Verify that the name is properly url encoded + object_name = 'm@obj€ct' + encoded_object_name = urlquote(object_name) + + headers = copy.deepcopy(self.base_headers) + body = '' + headers['etag'] = 'hash343hhash89h932439jsaa89' + return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_empty(self, method, url, body, headers): # test_upload_object_zero_size_object diff --git a/libcloud/utils/misc.py b/libcloud/utils/misc.py index 88af26b487..bfd9a92bbf 100644 --- a/libcloud/utils/misc.py +++ b/libcloud/utils/misc.py @@ -13,6 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +__all__ = [ + 'get_driver', + 'set_driver', + 'merge_valid_keys', + 'get_new_obj', + 'str2dicts', + 'dict2str', + 'reverse_dict', + 'lowercase_keys' +] + import sys @@ -206,7 +217,7 @@ def dict2str(data): """ result = '' for k in data: - if data[k] != None: + if data[k] is not None: result += '%s %s\n' % (str(k), str(data[k])) else: result += '%s\n' % str(k) diff --git a/tox.ini b/tox.ini index 8243f8f823..b6d070be4c 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,7 @@ [tox] - envlist = py25,py26,py27,pypy,py32,py33 +setenv = + PIP_USE_MIRRORS=1 [testenv] deps = mock From e726bc2148656ef1412681ff7ab3b54267b2d84b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 23 May 2013 23:24:45 +0000 Subject: [PATCH 057/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1485898 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 + libcloud/compute/drivers/cloudstack.py | 29 ++++-- libcloud/test/compute/test_cloudstack.py | 14 +++ libcloud/utils/iso8601.py | 122 +++++++++++++++++++++++ 4 files changed, 159 insertions(+), 10 deletions(-) create mode 100644 libcloud/utils/iso8601.py diff --git a/CHANGES b/CHANGES index 824c5e1da8..7ebb787682 100644 --- a/CHANGES +++ b/CHANGES @@ -50,6 +50,10 @@ Changes with Apache Libcloud in deveploment: return value. (LIBCLOUD-326) [Andre Merzky, Tomaz Muraus] + - Populate private_ips attribute in the CloudStack drive when returning + a Node object from the create_node method. (LIBCLOUD-329) + [Sebastien Goasguen, Tomaz Muraus] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index dd4c91662c..c2f2ad7f26 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -170,35 +170,40 @@ def list_nodes(self): vms = self._sync_request('listVirtualMachines') addrs = self._sync_request('listPublicIpAddresses') - public_ips = {} + public_ips_map = {} for addr in addrs.get('publicipaddress', []): if 'virtualmachineid' not in addr: continue vm_id = addr['virtualmachineid'] - if vm_id not in public_ips: - public_ips[vm_id] = {} - public_ips[vm_id][addr['ipaddress']] = addr['id'] + if vm_id not in public_ips_map: + public_ips_map[vm_id] = {} + public_ips_map[vm_id][addr['ipaddress']] = addr['id'] nodes = [] for vm in vms.get('virtualmachine', []): + state = self.NODE_STATE_MAP[vm['state']] + + public_ips = [] private_ips = [] for nic in vm['nic']: if 'ipaddress' in nic: private_ips.append(nic['ipaddress']) + public_ips = public_ips_map.get(vm['id'], {}).keys() + node = CloudStackNode( id=vm['id'], name=vm.get('displayname', None), - state=self.NODE_STATE_MAP[vm['state']], - public_ips=public_ips.get(vm['id'], {}).keys(), + state=state, + public_ips=public_ips, private_ips=private_ips, driver=self, extra={'zoneid': vm['zoneid'], } ) - addrs = public_ips.get(vm['id'], {}).items() + addrs = public_ips_map.get(vm['id'], {}).items() addrs = [CloudStackAddress(node, v, k) for k, v in addrs] node.extra['ip_addresses'] = addrs @@ -244,13 +249,17 @@ def create_node(self, name, size, image, location=None, **kwargs): ) node = result['virtualmachine'] + state = self.NODE_STATE_MAP[node['state']] + + public_ips = [] + private_ips = [nic['ipaddress'] for nic in node['nic']] return Node( id=node['id'], name=node['displayname'], - state=self.NODE_STATE_MAP[node['state']], - public_ips=[], - private_ips=[], + state=state, + public_ips=public_ips, + private_ips=private_ips, driver=self, extra={ 'zoneid': location.id, diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 90659ba6ab..b1e8ab1654 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -58,6 +58,20 @@ def test_create_node_delayed_failure(self): return self.assertTrue(False) + def test_create_node_default_location_success(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + default_location = self.driver.list_locations()[0] + + node = self.driver.create_node(name='fred', + image=image, + size=size) + + self.assertEqual(node.name, 'fred') + self.assertEqual(node.public_ips, []) + self.assertEqual(node.private_ips, ['1.1.1.2']) + self.assertEqual(node.extra['zoneid'], default_location.id) + def test_list_images_no_images_available(self): CloudStackMockHttp.fixture_tag = 'notemplates' diff --git a/libcloud/utils/iso8601.py b/libcloud/utils/iso8601.py new file mode 100644 index 0000000000..6675a76506 --- /dev/null +++ b/libcloud/utils/iso8601.py @@ -0,0 +1,122 @@ +""" +Copyright (c) 2007 Michael Twomey + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +ISO 8601 date time string parsing + +Basic usage: +>>> import iso8601 +>>> iso8601.parse_date("2007-01-25T12:00:00Z") +datetime.datetime(2007, 1, 25, 12, 0, tzinfo=) +>>> +""" + +# Taken from pyiso8601 which is licensed under the MIT license. + +from datetime import datetime, timedelta, tzinfo +import re + +__all__ = ["parse_date", "ParseError"] + +# Adapted from http://delete.me.uk/2005/03/iso8601.html +ISO8601_REGEX = re.compile(r"(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})" + r"((?P.)(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?" + r"(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?" +) +TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})") + +class ParseError(Exception): + """Raised when there is a problem parsing a date string""" + +# Yoinked from python docs +ZERO = timedelta(0) +class Utc(tzinfo): + """UTC + + """ + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO +UTC = Utc() + +class FixedOffset(tzinfo): + """Fixed offset in hours and minutes from UTC + + """ + def __init__(self, offset_hours, offset_minutes, name): + self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return ZERO + + def __repr__(self): + return "" % self.__name + +def parse_timezone(tzstring, default_timezone=UTC): + """Parses ISO 8601 time zone specs into tzinfo offsets + + """ + if tzstring == "Z": + return default_timezone + # This isn't strictly correct, but it's common to encounter dates without + # timezones so I'll assume the default (which defaults to UTC). + # Addresses issue 4. + if tzstring is None: + return default_timezone + m = TIMEZONE_REGEX.match(tzstring) + prefix, hours, minutes = m.groups() + hours, minutes = int(hours), int(minutes) + if prefix == "-": + hours = -hours + minutes = -minutes + return FixedOffset(hours, minutes, tzstring) + +def parse_date(datestring, default_timezone=UTC): + """Parses ISO 8601 dates into datetime objects + + The timezone is parsed from the date string. However it is quite common to + have dates without a timezone (not strictly correct). In this case the + default timezone specified in default_timezone is used. This is UTC by + default. + """ + m = ISO8601_REGEX.match(datestring) + if not m: + raise ParseError("Unable to parse date string %r" % datestring) + groups = m.groupdict() + tz = parse_timezone(groups["timezone"], default_timezone=default_timezone) + if groups["fraction"] is None: + groups["fraction"] = 0 + else: + groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) + return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), + int(groups["hour"]), int(groups["minute"]), int(groups["second"]), + int(groups["fraction"]), tz) From f0ee7e9ec498c2909b8302e84ea269bb36508638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 24 May 2013 00:15:46 +0000 Subject: [PATCH 058/143] Allow user to pass 'ex_auth_instance' OpenStackBaseConnection class constructor. This way a single OpenStackAuthConnection instance can be more easily re-used across multiple driver instances. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1485904 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/common/openstack.py | 61 ++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index d2eff93b88..18a975780d 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -461,6 +461,10 @@ class OpenStackBaseConnection(ConnectionUserAndKey): @param ex_force_service_region: Region to use when selecting an service. If not specified, a provider specific default will be used. @type ex_force_service_region: C{string} + + @param ex_auth_connection: OpenStackAuthConnection instance to use for + making HTTP requests. If not specified, a new one is instantiated. + @type ex_auth_connection: C{OpenStackAuthConnection} """ auth_url = None @@ -482,7 +486,8 @@ def __init__(self, user_id, key, secure=True, ex_tenant_name=None, ex_force_service_type=None, ex_force_service_name=None, - ex_force_service_region=None): + ex_force_service_region=None, + ex_auth_connection=None): self._ex_force_base_url = ex_force_base_url self._ex_force_auth_url = ex_force_auth_url @@ -492,7 +497,7 @@ def __init__(self, user_id, key, secure=True, self._ex_force_service_name = ex_force_service_name self._ex_force_service_region = ex_force_service_region - self._osa = None + self._auth_connection = ex_auth_connection if ex_force_auth_token: self.auth_token = ex_force_auth_token @@ -539,6 +544,29 @@ def get_endpoint(self): raise LibcloudError('Could not find specified endpoint') + def get_auth_connection_instance(self): + """ + Return an OpenStackAuthConnection instance for this connection. + """ + auth_url = self.auth_url + + if self._ex_force_auth_url is not None: + auth_url = self._ex_force_auth_url + + if auth_url is None: + raise LibcloudError('OpenStack instance must ' + + 'have auth_url set') + + if not self._auth_connection: + self._auth_connection = OpenStackAuthConnection(self, auth_url, + self._auth_version, + self.user_id, + self.key, + tenant_name=self._ex_tenant_name, + timeout=self.timeout) + + return self._auth_connection + def add_default_headers(self, headers): headers['X-Auth-Token'] = self.auth_token headers['Accept'] = self.accept_format @@ -558,29 +586,17 @@ def _populate_hosts_and_request_paths(self): """ if not self.auth_token: - aurl = self.auth_url - - if self._ex_force_auth_url is not None: - aurl = self._ex_force_auth_url - - if aurl == None: - raise LibcloudError('OpenStack instance must ' + - 'have auth_url set') - - osa = OpenStackAuthConnection(self, aurl, self._auth_version, - self.user_id, self.key, - tenant_name=self._ex_tenant_name, - timeout=self.timeout) + auth_connection = self.get_auth_connection_instance() # may throw InvalidCreds, etc - osa.authenticate() + auth_connection.authenticate() - self.auth_token = osa.auth_token - self.auth_token_expires = osa.auth_token_expires - self.auth_user_info = osa.auth_user_info + self.auth_token = auth_connection.auth_token + self.auth_token_expires = auth_connection.auth_token_expires + self.auth_user_info = auth_connection.auth_user_info # pull out and parse the service catalog - self.service_catalog = OpenStackServiceCatalog(osa.urls, + self.service_catalog = OpenStackServiceCatalog(auth_connection.urls, ex_force_auth_version=self._auth_version) # Set up connection info @@ -609,6 +625,7 @@ def __init__(self, *args, **kwargs): self._ex_force_service_name = kwargs.get('ex_force_service_name', None) self._ex_force_service_region = kwargs.get('ex_force_service_region', None) + self._auth_connection = kwargs.get('ex_auth_connection', None) def openstack_connection_kwargs(self): """ @@ -632,4 +649,8 @@ def openstack_connection_kwargs(self): rv['ex_force_service_name'] = self._ex_force_service_name if self._ex_force_service_region: rv['ex_force_service_region'] = self._ex_force_service_region + + if self._auth_connection: + rv['ex_auth_connection'] = self._auth_connection + return rv From 4d6989462f380eeff18c83005e1d62361d840ebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 24 May 2013 00:31:15 +0000 Subject: [PATCH 059/143] Refactor the code, less copy and paste mess. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1485905 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/common/openstack.py | 72 ++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 18a975780d..ba1425a177 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -55,7 +55,7 @@ 'OpenStackServiceCatalog', 'OpenStackDriverMixin', "OpenStackBaseConnection", - "OpenStackAuthConnection" + "OpenStackAuthConnection", 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS' ] @@ -615,42 +615,50 @@ def _add_cache_busting_to_params(self, params): class OpenStackDriverMixin(object): + # Extenstion arguments which are passed to the connection class. + EXTENSTION_ARGUMENTS = [ + 'ex_force_base_url', + 'ex_force_auth_token', + 'ex_force_auth_url', + 'ex_force_auth_version', + 'ex_tenant_name', + 'ex_force_service_type', + 'ex_force_service_name', + 'ex_force_service_region', + 'ex_auth_connection' + ] + def __init__(self, *args, **kwargs): - self._ex_force_base_url = kwargs.get('ex_force_base_url', None) - self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None) - self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None) - self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None) - self._ex_tenant_name = kwargs.get('ex_tenant_name', None) - self._ex_force_service_type = kwargs.get('ex_force_service_type', None) - self._ex_force_service_name = kwargs.get('ex_force_service_name', None) - self._ex_force_service_region = kwargs.get('ex_force_service_region', - None) - self._auth_connection = kwargs.get('ex_auth_connection', None) + pairs = self._get_argument_pairs() + for argument_name, attribute_name in pairs: + value = kwargs.get(argument_name, None) + + if value is None: + continue + + setattr(self, attribute_name, value) def openstack_connection_kwargs(self): """ - @rtype: C{dict} """ - rv = {} - if self._ex_force_base_url: - rv['ex_force_base_url'] = self._ex_force_base_url - if self._ex_force_auth_token: - rv['ex_force_auth_token'] = self._ex_force_auth_token - if self._ex_force_auth_url: - rv['ex_force_auth_url'] = self._ex_force_auth_url - if self._ex_force_auth_version: - rv['ex_force_auth_version'] = self._ex_force_auth_version - if self._ex_tenant_name: - rv['ex_tenant_name'] = self._ex_tenant_name - if self._ex_force_service_type: - rv['ex_force_service_type'] = self._ex_force_service_type - if self._ex_force_service_name: - rv['ex_force_service_name'] = self._ex_force_service_name - if self._ex_force_service_region: - rv['ex_force_service_region'] = self._ex_force_service_region + result = {} - if self._auth_connection: - rv['ex_auth_connection'] = self._auth_connection + pairs = self._get_argument_pairs() + for argument_name, attribute_name in pairs: + value = getattr(self, attribute_name, None) - return rv + if not value: + continue + + result[argument_name] = value + + return result + + def _get_argument_pairs(self): + result = [] + for argument_name in self.EXTENSTION_ARGUMENTS: + attribute_name = '_%s' % (argument_name) + result.append([argument_name, attribute_name]) + + return result From 848a7e316b925cd1ffba00a824e327d182dfea24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 24 May 2013 00:58:45 +0000 Subject: [PATCH 060/143] Add a utility function which prints some simple HTTP request statistics if LIBCLOUD_DEBUG and LIBCLOUD_REQUESTS_STATS environment variable is set. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1485912 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/__init__.py | 19 +++++++++++++++++-- libcloud/common/base.py | 24 ++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 8d88d664d9..2df15fb6a6 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -22,6 +22,9 @@ __all__ = ['__version__', 'enable_debug'] __version__ = '0.12.4' +import os +import atexit + try: import paramiko have_paramiko = True @@ -29,6 +32,9 @@ have_paramiko = False +from libcloud.utils.debug import print_request_statistics + + def enable_debug(fo): """ Enable library wide debugging to a file-like object. @@ -38,12 +44,22 @@ def enable_debug(fo): """ from libcloud.common.base import (Connection, LoggingHTTPConnection, - LoggingHTTPSConnection) + LoggingHTTPSConnection, + REQUESTS_LOG) LoggingHTTPSConnection.log = fo LoggingHTTPConnection.log = fo Connection.conn_classes = (LoggingHTTPConnection, LoggingHTTPSConnection) + # Register a handler which prints some request statistics upon exit + enable_requests_stats = os.getenv('LIBCLOUD_REQUESTS_STATS') + + if enable_requests_stats: + LoggingHTTPSConnection.enable_requests_stats = True + LoggingHTTPConnection.enable_requests_stats = True + atexit.register(print_request_statistics, fo=fo, + requests_log=REQUESTS_LOG) + def _init_once(): """ @@ -52,7 +68,6 @@ def _init_once(): This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists is where we will log debug information about the provider transports. """ - import os path = os.getenv('LIBCLOUD_DEBUG') if path: fo = open(path, 'a') diff --git a/libcloud/common/base.py b/libcloud/common/base.py index c8eca2705e..c03a5ea286 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -44,6 +44,12 @@ LibcloudHTTPConnection = httplib.HTTPConnection +# Stores information about all of the issued HTTP request. +# Request logger is only active is LIBCLOUD_DEBUG and LIBCLOUD_REQUESTS_STATS +# environment variable is set and should NOT be used in production. +REQUESTS_LOG = [] + + class HTTPResponse(httplib.HTTPResponse): # On python 2.6 some calls can hang because HEAD isn't quite properly # supported. @@ -239,6 +245,7 @@ class LoggingConnection(): @cvar log: file-like object that logs entries are written to. """ log = None + enable_requests_stats = False def _log_response(self, r): rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) @@ -312,11 +319,17 @@ def _log_curl(self, method, url, body, headers): if body is not None and len(body) > 0: cmd.extend(["--data-binary", pquote(body)]) + url = pquote(self._get_url(path=url)) + cmd.extend(["--compress"]) - cmd.extend([pquote("%s://%s:%d%s" % (self.protocol, self.host, - self.port, url))]) + cmd.extend([url]) return " ".join(cmd) + def _get_url(self, path): + url = '%s://%s:%d%s' % (self.protocol, self.host, + self.port, path) + return url + class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection): """ @@ -335,11 +348,18 @@ def getresponse(self): def request(self, method, url, body=None, headers=None): headers.update({'X-LC-Request-ID': str(id(self))}) + if self.log is not None: pre = "# -------- begin %d request ----------\n" % id(self) self.log.write(pre + self._log_curl(method, url, body, headers) + "\n") self.log.flush() + + if self.enable_requests_stats: + full_url = self._get_url(path=url) + obj = {'method': method, 'url': full_url} + REQUESTS_LOG.append(obj) + return LibcloudHTTPSConnection.request(self, method, url, body, headers) From b848ccc7cfc62aa8ca523ce0638a1a26a1bad498 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Fri, 24 May 2013 19:00:31 +0000 Subject: [PATCH 061/143] Only import libcloud.utils.debug module if enable_requests_stats environment variable is set. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1486170 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 2df15fb6a6..58fc9bb9ad 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -32,9 +32,6 @@ have_paramiko = False -from libcloud.utils.debug import print_request_statistics - - def enable_debug(fo): """ Enable library wide debugging to a file-like object. @@ -55,6 +52,8 @@ def enable_debug(fo): enable_requests_stats = os.getenv('LIBCLOUD_REQUESTS_STATS') if enable_requests_stats: + from libcloud.utils.debug import print_request_statistics + LoggingHTTPSConnection.enable_requests_stats = True LoggingHTTPConnection.enable_requests_stats = True atexit.register(print_request_statistics, fo=fo, From 9dd39a928f6e44a05c79e6e55bd0423c26a13a90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 1 Jun 2013 08:00:41 +0000 Subject: [PATCH 062/143] Backport commits from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1488484 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 8 ++ libcloud/compute/drivers/cloudstack.py | 20 +++- libcloud/compute/drivers/gandi.py | 109 +++++++++++++---- libcloud/data/pricing.json | 6 +- .../fixtures/gandi/account_info_rating.xml | 58 +++++++++ libcloud/test/compute/test_gandi.py | 111 ++++++++++++++---- 6 files changed, 260 insertions(+), 52 deletions(-) create mode 100644 libcloud/test/compute/fixtures/gandi/account_info_rating.xml diff --git a/CHANGES b/CHANGES index 7ebb787682..a5c11dfd4e 100644 --- a/CHANGES +++ b/CHANGES @@ -54,6 +54,14 @@ Changes with Apache Libcloud in deveploment: a Node object from the create_node method. (LIBCLOUD-329) [Sebastien Goasguen, Tomaz Muraus] + - Allow user to pass extra arguments via "extra_args" argument which are + then passed to the "deployVirtualMachine" call in the CloudStack driver + create_node method. (LIBCLOUD-330) + [Sebastien Goasguen, Tomaz Muraus] + + - Update Gandi driver to handle new billing model. (LIBCLOUD-317) + [Aymeric Barantal] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index c2f2ad7f26..e2c85ddb95 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -230,22 +230,34 @@ def list_sizes(self, location=None): 0, self)) return sizes - def create_node(self, name, size, image, location=None, **kwargs): + def create_node(self, name, size, image, location=None, extra_args=None, + **kwargs): """ @inherits: L{NodeDriver.create_node} + + @keyword extra_args: Extra argument passed to the + "deployVirtualMachine" call. A list of available arguments can be found + at http://cloudstack.apache.org/docs/api/apidocs-4.0.0/root_admin/deployVirtualMachine.html + @type extra_args: C{dict} + @rtype: L{CloudStackNode} """ - extra_args = {} + + if extra_args: + request_args = extra_args.copy() + else: + request_args = {} + if location is None: location = self.list_locations()[0] if 'network_id' in kwargs: - extra_args['networkids'] = network_id + request_args['networkids'] = network_id result = self._async_request( 'deployVirtualMachine', name=name, displayname=name, serviceofferingid=size.id, templateid=image.id, - zoneid=location.id, **extra_args + zoneid=location.id, **request_args ) node = result['virtualmachine'] diff --git a/libcloud/compute/drivers/gandi.py b/libcloud/compute/drivers/gandi.py index d9baceab69..1278173620 100644 --- a/libcloud/compute/drivers/gandi.py +++ b/libcloud/compute/drivers/gandi.py @@ -39,6 +39,41 @@ NODE_PRICE_HOURLY_USD = 0.02 +INSTANCE_TYPES = { + 'small': { + 'id': 'small', + 'name': 'Small instance', + 'cpu': 1, + 'memory': 256, + 'disk': 3, + 'bandwidth': 100, + }, + 'medium': { + 'id': 'medium', + 'name': 'Medium instance', + 'cpu': 1, + 'memory': 1024, + 'disk': 20, + 'bandwidth': 100, + }, + 'large': { + 'id': 'large', + 'name': 'Large instance', + 'cpu': 2, + 'memory': 2048, + 'disk': 50, + 'bandwidth': 100, + }, + 'extra-large': { + 'id': 'x-large', + 'name': 'Extra Large instance', + 'cpu': 4, + 'memory': 4096, + 'disk': 100, + 'bandwidth': 100, + }, +} + class GandiNodeDriver(BaseGandiDriver, NodeDriver): """ @@ -61,7 +96,7 @@ def __init__(self, *args, **kwargs): def _resource_info(self, type, id): try: - obj = self.connection.request('%s.info' % type, int(id)) + obj = self.connection.request('hosting.%s.info' % type, int(id)) return obj.object except Exception: e = sys.exc_info()[1] @@ -109,8 +144,8 @@ def _to_volumes(self, disks): return [self._to_volume(d) for d in disks] def list_nodes(self): - vms = self.connection.request('vm.list').object - ips = self.connection.request('ip.list').object + vms = self.connection.request('hosting.vm.list').object + ips = self.connection.request('hosting.ip.list').object for vm in vms: vm['ips'] = [] for ip in ips: @@ -123,7 +158,7 @@ def list_nodes(self): return nodes def reboot_node(self, node): - op = self.connection.request('vm.reboot', int(node.id)) + op = self.connection.request('hosting.vm.reboot', int(node.id)) self._wait_operation(op.object['id']) vm = self._node_info(int(node.id)) if vm['state'] == 'running': @@ -134,11 +169,11 @@ def destroy_node(self, node): vm = self._node_info(node.id) if vm['state'] == 'running': # Send vm_stop and wait for accomplish - op_stop = self.connection.request('vm.stop', int(node.id)) + op_stop = self.connection.request('hosting.vm.stop', int(node.id)) if not self._wait_operation(op_stop.object['id']): raise GandiException(1010, 'vm.stop failed') # Delete - op = self.connection.request('vm.delete', int(node.id)) + op = self.connection.request('hosting.vm.delete', int(node.id)) if self._wait_operation(op.object['id']): return True return False @@ -198,6 +233,10 @@ def create_node(self, **kwargs): raise GandiException( 1022, 'size must be a subclass of NodeSize') + # If size name is in INSTANCE_TYPE we use new rating model + instance = INSTANCE_TYPES.get(size.id) + cores = instance['cpu'] if instance else int(size.id) + src_disk_id = int(kwargs['image'].id) disk_spec = { @@ -211,7 +250,7 @@ def create_node(self, **kwargs): 'login': kwargs['login'], 'password': kwargs['password'], # TODO : use NodeAuthPassword 'memory': int(size.ram), - 'cores': int(size.id), + 'cores': cores, 'bandwidth': int(size.bandwidth), 'ip_version': kwargs.get('inet_family', 4), } @@ -219,7 +258,7 @@ def create_node(self, **kwargs): # Call create_from helper api. Return 3 operations : disk_create, # iface_create,vm_create (op_disk, op_iface, op_vm) = self.connection.request( - 'vm.create_from', + 'hosting.vm.create_from', vm_spec, disk_spec, src_disk_id ).object @@ -250,7 +289,7 @@ def list_images(self, location=None): filtering = {'datacenter_id': int(location.id)} else: filtering = {} - images = self.connection.request('image.list', filtering) + images = self.connection.request('hosting.image.list', filtering) return [self._to_image(i) for i in images.object] except Exception: e = sys.exc_info()[1] @@ -267,8 +306,26 @@ def _to_size(self, id, size): driver=self.connection.driver, ) + def _instance_type_to_size(self, instance): + return NodeSize( + id=instance['id'], + name=instance['name'], + ram=instance['memory'], + disk=instance['disk'], + bandwidth=instance['bandwidth'], + price=self._get_size_price(size_id=instance['id']), + driver=self.connection.driver, + ) + + def list_instance_type(self, location=None): + return [self._instance_type_to_size(instance) + for name, instance in INSTANCE_TYPES.items()] + def list_sizes(self, location=None): - account = self.connection.request('account.info').object + account = self.connection.request('hosting.account.info').object + if account.get('rating_enabled'): + # This account use new rating model + return self.list_instance_type(location) # Look for available shares, and return a list of share_definition available_res = account['resources']['available'] @@ -306,7 +363,7 @@ def _to_loc(self, loc): ) def list_locations(self): - res = self.connection.request('datacenter.list') + res = self.connection.request('hosting.datacenter.list') return [self._to_loc(l) for l in res.object] def list_volumes(self): @@ -314,7 +371,7 @@ def list_volumes(self): @rtype: C{list} of L{StorageVolume} """ - res = self.connection.request('disk.list', {}) + res = self.connection.request('hosting.disk.list', {}) return self._to_volumes(res.object) def create_volume(self, size, name, location=None, snapshot=None): @@ -324,17 +381,17 @@ def create_volume(self, size, name, location=None, snapshot=None): 'datacenter_id': int(location.id) } if snapshot: - op = self.connection.request('disk.create_from', + op = self.connection.request('hosting.disk.create_from', disk_param, int(snapshot.id)) else: - op = self.connection.request('disk.create', disk_param) + op = self.connection.request('hosting.disk.create', disk_param) if self._wait_operation(op.object['id']): disk = self._volume_info(op.object['disk_id']) return self._to_volume(disk) return None def attach_volume(self, node, volume, device=None): - op = self.connection.request('vm.disk_attach', + op = self.connection.request('hosting.vm.disk_attach', int(node.id), int(volume.id)) if self._wait_operation(op.object['id']): return True @@ -352,14 +409,14 @@ def detach_volume(self, node, volume): @rtype: C{bool} """ - op = self.connection.request('vm.disk_detach', + op = self.connection.request('hosting.vm.disk_detach', int(node.id), int(volume.id)) if self._wait_operation(op.object['id']): return True return False def destroy_volume(self, volume): - op = self.connection.request('disk.delete', int(volume.id)) + op = self.connection.request('hosting.disk.delete', int(volume.id)) if self._wait_operation(op.object['id']): return True return False @@ -401,8 +458,8 @@ def ex_list_interfaces(self): @rtype: C{list} of L{GandiNetworkInterface} """ - ifaces = self.connection.request('iface.list').object - ips = self.connection.request('ip.list').object + ifaces = self.connection.request('hosting.iface.list').object + ips = self.connection.request('hosting.ip.list').object for iface in ifaces: iface['ips'] = list( filter(lambda i: i['iface_id'] == iface['id'], ips)) @@ -431,7 +488,7 @@ def ex_list_disks(self): @rtype: C{list} of L{GandiDisk} """ - res = self.connection.request('disk.list', {}) + res = self.connection.request('hosting.disk.list', {}) return self._to_disks(res.object) def ex_node_attach_disk(self, node, disk): @@ -446,7 +503,7 @@ def ex_node_attach_disk(self, node, disk): @rtype: C{bool} """ - op = self.connection.request('vm.disk_attach', + op = self.connection.request('hosting.vm.disk_attach', int(node.id), int(disk.id)) if self._wait_operation(op.object['id']): return True @@ -464,7 +521,7 @@ def ex_node_detach_disk(self, node, disk): @rtype: C{bool} """ - op = self.connection.request('vm.disk_detach', + op = self.connection.request('hosting.vm.disk_detach', int(node.id), int(disk.id)) if self._wait_operation(op.object['id']): return True @@ -483,7 +540,7 @@ def ex_node_attach_interface(self, node, iface): @rtype: C{bool} """ - op = self.connection.request('vm.iface_attach', + op = self.connection.request('hosting.vm.iface_attach', int(node.id), int(iface.id)) if self._wait_operation(op.object['id']): return True @@ -502,7 +559,7 @@ def ex_node_detach_interface(self, node, iface): @rtype: C{bool} """ - op = self.connection.request('vm.iface_detach', + op = self.connection.request('hosting.vm.iface_detach', int(node.id), int(iface.id)) if self._wait_operation(op.object['id']): return True @@ -526,7 +583,7 @@ def ex_snapshot_disk(self, disk, name=None): suffix = datetime.today().strftime('%Y%m%d') name = 'snap_%s' % (suffix) op = self.connection.request( - 'disk.create_from', + 'hosting.disk.create_from', {'name': name, 'type': 'snapshot', }, int(disk.id), ) @@ -554,7 +611,7 @@ def ex_update_disk(self, disk, new_size=None, new_name=None): params.update({'size': new_size}) if new_name: params.update({'name': new_name}) - op = self.connection.request('disk.update', + op = self.connection.request('hosting.disk.update', int(disk.id), params) if self._wait_operation(op.object['id']): diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 6bbbcdcc13..905d6574b6 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -239,7 +239,11 @@ }, "gandi": { - "1": 0.02 + "1": 0.02, + "small": 0.02, + "medium": 0.03, + "large": 0.06, + "x-large": 0.12 }, "vps_net": { diff --git a/libcloud/test/compute/fixtures/gandi/account_info_rating.xml b/libcloud/test/compute/fixtures/gandi/account_info_rating.xml new file mode 100644 index 0000000000..9514f990f0 --- /dev/null +++ b/libcloud/test/compute/fixtures/gandi/account_info_rating.xml @@ -0,0 +1,58 @@ + + + + + + +handle +AB9090-GANDI + + +rating_enabled +1 + + +date_credits_expiration + + +credits +0 + + +products + + +average_credit_cost + + +share_definition + + +fullname +Aymeric BARANTAL + + +id +24 + + +resources + + +available + + +granted + + +used + + +expired + + + + + + + diff --git a/libcloud/test/compute/test_gandi.py b/libcloud/test/compute/test_gandi.py index f1076825aa..b6aef09096 100644 --- a/libcloud/test/compute/test_gandi.py +++ b/libcloud/test/compute/test_gandi.py @@ -155,35 +155,73 @@ def test_ex_update_disk(self): self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) +class GandiRatingTests(unittest.TestCase): + """Tests where rating model is involved""" + + node_name = 'test2' + + def setUp(self): + GandiNodeDriver.connectionCls.conn_classes = ( + GandiMockRatingHttp, GandiMockRatingHttp) + GandiMockRatingHttp.type = None + self.driver = GandiNodeDriver(*GANDI_PARAMS) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 4) + + def test_create_node(self): + login = 'libcloud' + passwd = ''.join(random.choice(string.ascii_letters) + for i in range(10)) + + # Get france datacenter + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + + # Get a debian image + images = self.driver.list_images(loc) + images = [x for x in images if x.name.lower().startswith('debian')] + img = list(filter(lambda x: '5' in x.name, images))[0] + + # Get a configuration size + size = self.driver.list_sizes()[0] + node = self.driver.create_node(name=self.node_name, login=login, + password=passwd, image=img, + location=loc, size=size) + self.assertEqual(node.name, self.node_name) + + + class GandiMockHttp(BaseGandiMockHttp): fixtures = ComputeFileFixtures('gandi') - def _xmlrpc__datacenter_list(self, method, url, body, headers): + def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): body = self.fixtures.load('datacenter_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__image_list(self, method, url, body, headers): + def _xmlrpc__hosting_image_list(self, method, url, body, headers): body = self.fixtures.load('image_list_dc0.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_list(self, method, url, body, headers): + def _xmlrpc__hosting_vm_list(self, method, url, body, headers): body = self.fixtures.load('vm_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__ip_list(self, method, url, body, headers): + def _xmlrpc__hosting_ip_list(self, method, url, body, headers): body = self.fixtures.load('ip_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__account_info(self, method, url, body, headers): + def _xmlrpc__hosting_account_info(self, method, url, body, headers): body = self.fixtures.load('account_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_info(self, method, url, body, headers): + def _xmlrpc__hosting_vm_info(self, method, url, body, headers): body = self.fixtures.load('vm_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_delete(self, method, url, body, headers): + def _xmlrpc__hosting_vm_delete(self, method, url, body, headers): body = self.fixtures.load('vm_delete.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) @@ -191,62 +229,93 @@ def _xmlrpc__operation_info(self, method, url, body, headers): body = self.fixtures.load('operation_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_create_from(self, method, url, body, headers): + def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): body = self.fixtures.load('vm_create_from.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_reboot(self, method, url, body, headers): + def _xmlrpc__hosting_vm_reboot(self, method, url, body, headers): body = self.fixtures.load('vm_reboot.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_stop(self, method, url, body, headers): + def _xmlrpc__hosting_vm_stop(self, method, url, body, headers): body = self.fixtures.load('vm_stop.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__iface_list(self, method, url, body, headers): + def _xmlrpc__hosting_iface_list(self, method, url, body, headers): body = self.fixtures.load('iface_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_list(self, method, url, body, headers): + def _xmlrpc__hosting_disk_list(self, method, url, body, headers): body = self.fixtures.load('disk_list.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_iface_attach(self, method, url, body, headers): + def _xmlrpc__hosting_vm_iface_attach(self, method, url, body, headers): body = self.fixtures.load('iface_attach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_iface_detach(self, method, url, body, headers): + def _xmlrpc__hosting_vm_iface_detach(self, method, url, body, headers): body = self.fixtures.load('iface_detach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_disk_attach(self, method, url, body, headers): + def _xmlrpc__hosting_vm_disk_attach(self, method, url, body, headers): body = self.fixtures.load('disk_attach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__vm_disk_detach(self, method, url, body, headers): + def _xmlrpc__hosting_vm_disk_detach(self, method, url, body, headers): body = self.fixtures.load('disk_detach.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_create(self, method, url, body, headers): + def _xmlrpc__hosting_disk_create(self, method, url, body, headers): body = self.fixtures.load('disk_create.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_create_from(self, method, url, body, headers): + def _xmlrpc__hosting_disk_create_from(self, method, url, body, headers): body = self.fixtures.load('disk_create_from.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_info(self, method, url, body, headers): + def _xmlrpc__hosting_disk_info(self, method, url, body, headers): body = self.fixtures.load('disk_info.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_update(self, method, url, body, headers): + def _xmlrpc__hosting_disk_update(self, method, url, body, headers): body = self.fixtures.load('disk_update.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _xmlrpc__disk_delete(self, method, url, body, headers): + def _xmlrpc__hosting_disk_delete(self, method, url, body, headers): body = self.fixtures.load('disk_delete.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) +class GandiMockRatingHttp(BaseGandiMockHttp): + """Fixtures needed for tests related to rating model""" + + fixtures = ComputeFileFixtures('gandi') + + def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + # Specific to rating tests + def _xmlrpc__hosting_account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info_rating.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if __name__ == '__main__': sys.exit(unittest.main()) From 6f5bc669b09889afaa40b90a771cc9712bc08f12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Mon, 3 Jun 2013 22:17:01 +0000 Subject: [PATCH 063/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1489206 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++++ libcloud/compute/drivers/linode.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index a5c11dfd4e..561d6e3517 100644 --- a/CHANGES +++ b/CHANGES @@ -62,6 +62,10 @@ Changes with Apache Libcloud in deveploment: - Update Gandi driver to handle new billing model. (LIBCLOUD-317) [Aymeric Barantal] + - Fix a bug in the Linode driver and remove extra newline which is added + when generating a random root password in create_node. (LIBCLOUD-334) + [Juan Carlos Moreno] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/linode.py b/libcloud/compute/drivers/linode.py index 786fde32aa..86eb61c7f9 100644 --- a/libcloud/compute/drivers/linode.py +++ b/libcloud/compute/drivers/linode.py @@ -319,7 +319,7 @@ def create_node(self, **kwargs): # Step 2: linode.disk.createfromdistribution if not root: - root = binascii.b2a_base64(os.urandom(8)).decode('ascii') + root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip() params = { "api_action": "linode.disk.createfromdistribution", From c961011c95f06a469c8f74bbbf25a59f8123d3e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 6 Jun 2013 20:16:15 +0000 Subject: [PATCH 064/143] Backport changes from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1490422 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 12 + libcloud/compute/drivers/cloudstack.py | 346 +++++++++++++++++- ...authorizeSecurityGroupIngress_default.json | 2 + .../createSecurityGroup_default.json | 1 + .../cloudstack/deleteSSHKeyPair_default.json | 2 + .../deleteSecurityGroup_default.json | 2 + .../cloudstack/listSSHKeyPairs_default.json | 1 + .../listSecurityGroups_default.json | 1 + .../cloudstack/queryAsyncJobResult_17188.json | 1 + .../cloudstack/queryAsyncJobResult_17199.json | 1 + .../cloudstack/queryAsyncJobResult_17200.json | 1 + .../startVirtualMachine_default.json | 1 + .../stopVirtualMachine_default.json | 1 + libcloud/test/compute/test_cloudstack.py | 77 +++- 14 files changed, 443 insertions(+), 6 deletions(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json diff --git a/CHANGES b/CHANGES index 561d6e3517..f53b5b763a 100644 --- a/CHANGES +++ b/CHANGES @@ -66,6 +66,18 @@ Changes with Apache Libcloud in deveploment: when generating a random root password in create_node. (LIBCLOUD-334) [Juan Carlos Moreno] + - Add extension methods for managing keypairs to the CloudStack driver. + (LIBCLOUD-333) + [sebastien goasguen] + + - Add extension methods for managing security groups to the CloudStack + driver. (LIBCLOUD-332) + [sebastien goasguen] + + - Add extension methods for starting and stoping the node to the + CloudStack driver. (LIBCLOUD-338) + [sebastien goasguen] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index e2c85ddb95..d2c1799085 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -41,6 +41,14 @@ def ex_delete_ip_forwarding_rule(self, rule): "Delete a NAT/firewall rule." return self.driver.ex_delete_ip_forwarding_rule(self, rule) + def ex_start(self): + "Starts a stopped virtual machine" + return self.driver.ex_start(self) + + def ex_stop(self): + "Stops a running virtual machine" + return self.driver.ex_stop(self) + class CloudStackAddress(object): "A public IP address." @@ -252,7 +260,7 @@ def create_node(self, name, size, image, location=None, extra_args=None, location = self.list_locations()[0] if 'network_id' in kwargs: - request_args['networkids'] = network_id + request_args['networkids'] = kwargs['network_id'] result = self._async_request( 'deployVirtualMachine', name=name, displayname=name, @@ -296,8 +304,47 @@ def reboot_node(self, node): self._async_request('rebootVirtualMachine', id=node.id) return True + def ex_start(self, node): + """ + Starts/Resumes a stopped virtual machine + + @type node: L{CloudStackNode} + + @param id: The ID of the virtual machine (required) + @type id: C{uuid} + + @param hostid: destination Host ID to deploy the VM to + parameter available for root admin only + @type hostid: C{uuid} + + @rtype C{str} + """ + res = self._async_request('startVirtualMachine', id=node.id) + return res['virtualmachine']['state'] + + def ex_stop(self, node): + """ + Stops/Suspends a running virtual machine + + @type node: L{CloudStackNode} + + @param id: The ID of the virtual machine + @type id: C{uuid} + + @param forced: Force stop the VM + (vm is marked as Stopped even when command + fails to be send to the backend). + The caller knows the VM is stopped. + @type forced: C{bool} + + @rtype C{str} + """ + res = self._async_request('stopVirtualMachine', id=node.id) + return res['virtualmachine']['state'] + def ex_list_disk_offerings(self): - """Fetch a list of all available disk offerings. + """ + Fetch a list of all available disk offerings. @rtype: C{list} of L{CloudStackDiskOffering} """ @@ -362,7 +409,7 @@ def destroy_volume(self, volume): def ex_allocate_public_ip(self, node): """ - "Allocate a public IP and bind it to a node. + Allocate a public IP and bind it to a node. @param node: Node which should be used @type node: L{CloudStackNode} @@ -463,6 +510,299 @@ def ex_delete_ip_forwarding_rule(self, node, rule): self._async_request('deleteIpForwardingRule', id=rule.id) return True + def ex_list_keypairs(self, **kwargs): + """ + List Registered SSH Key Pairs + + @param projectid: list objects by project + @type projectid: C{uuid} + + @param page: The page to list the keypairs from + @type page: C{int} + + @param keyword: List by keyword + @type keyword: C{str} + + @param listall: If set to false, list only resources + belonging to the command's caller; + if set to true - list resources that + the caller is authorized to see. + Default value is false + + @type listall: C{bool} + + @param pagesize: The number of results per page + @type pagesize: C{int} + + @param account: List resources by account. + Must be used with the domainId parameter + @type account: C{str} + + @param isrecursive: Defaults to false, but if true, + lists all resources from + the parent specified by the + domainId till leaves. + @type isrecursive: C{bool} + + @param fingerprint: A public key fingerprint to look for + @type fingerprint: C{str} + + @param name: A key pair name to look for + @type name: C{str} + + @param domainid: List only resources belonging to + the domain specified + @type domainid: C{uuid} + + @return: A list of keypair dictionaries + @rtype: L{dict} + """ + + extra_args = kwargs.copy() + res = self._sync_request('listSSHKeyPairs', **extra_args) + return res['sshkeypair'] + + def ex_create_keypair(self, name, **kwargs): + """ + Creates a SSH KeyPair, returns fingerprint and private key + + @param name: Name of the keypair (required) + @type name: C{str} + + @param projectid: An optional project for the ssh key + @type projectid: C{str} + + @param domainid: An optional domainId for the ssh key. + If the account parameter is used, + domainId must also be used. + @type domainid: C{str} + + @param account: An optional account for the ssh key. + Must be used with domainId. + @type account: C{str} + + @return: A keypair dictionary + @rtype: C{dict} + """ + extra_args = kwargs.copy() + + for keypair in self.ex_list_keypairs(): + if keypair['name'] == name: + raise LibcloudError('SSH KeyPair with name=%s already exists' + % (name)) + + res = self._sync_request('createSSHKeyPair', name=name, **extra_args) + return res['keypair'] + + def ex_delete_keypair(self, name, **kwargs): + """ + Deletes an existing SSH KeyPair + + @param name: Name of the keypair (required) + @type name: C{str} + + @param projectid: The project associated with keypair + @type projectid: C{uuid} + + @param domainid : The domain ID associated with the keypair + @type domainid: C{uuid} + + @param account : The account associated with the keypair. + Must be used with the domainId parameter. + @type account: C{str} + + @return: True of False based on success of Keypair deletion + @rtype: C{bool} + """ + + extra_args = kwargs.copy() + + res = self._sync_request('deleteSSHKeyPair', name=name, **extra_args) + return res['success'] + + def ex_list_security_groups(self, **kwargs): + """ + Lists Security Groups + + @param domainid: List only resources belonging to the domain specified + @type domainid: C{uuid} + + @param account: List resources by account. Must be used with + the domainId parameter. + @type account: C{str} + + @param listall: If set to false, list only resources belonging to + the command's caller; if set to true + list resources that the caller is + authorized to see. + Default value is false + @type listall: C{bool} + + @param pagesize: Number of entries per page + @type pagesize: C{int} + + @param keyword: List by keyword + @type keyword: C{str} + + @param tags: List resources by tags (key/value pairs) + @type tags: C{dict} + + @param id: list the security group by the id provided + @type id: C{uuid} + + @param securitygroupname: lists security groups by name + @type securitygroupname: C{str} + + @param virtualmachineid: lists security groups by virtual machine id + @type virtualmachineid: C{uuid} + + @param projectid: list objects by project + @type projectid: C{uuid} + + @param isrecursive: (boolean) defaults to false, but if true, + lists all resources from the parent + specified by the domainId till leaves. + @type isrecursive: C{bool} + + @param page: (integer) + @type page: C{int} + + @rtype C{list} + """ + extra_args = kwargs + return self._sync_request('listSecurityGroups', + **extra_args)['securitygroup'] + + def ex_create_security_group(self, name, **kwargs): + """ + Creates a new Security Group + + @param name: name of the security group (required) + @type name: C{str} + + @param account: An optional account for the security group. + Must be used with domainId. + @type account: C{str} + + @param domainid: An optional domainId for the security group. + If the account parameter is used, + domainId must also be used. + @type domainid: C{uuid} + + @param description: The description of the security group + @type description: C{str} + + @param projectid: Deploy vm for the project + @type projectid: C{uuid} + + @rtype: C{dict} + """ + + extra_args = kwargs.copy() + + for sg in self.ex_list_security_groups(): + if name in sg['name']: + raise LibcloudError('This Security Group name already exists') + + return self._sync_request('createSecurityGroup', + name=name, **extra_args)['securitygroup'] + + def ex_delete_security_group(self, name): + """ + Deletes a given Security Group + + @param domainid: The domain ID of account owning + the security group + @type domainid: C{uuid} + + @param id: The ID of the security group. + Mutually exclusive with name parameter + @type id: C{uuid} + + @param name: The ID of the security group. + Mutually exclusive with id parameter + @type name: C{str} + + @param account: The account of the security group. + Must be specified with domain ID + @type account: C{str} + + @param projectid: The project of the security group + @type projectid: C{uuid} + + @rtype: C{bool} + """ + + return self._sync_request('deleteSecurityGroup', name=name)['success'] + + def ex_authorize_security_group_ingress(self, securitygroupname, + protocol, cidrlist, startport, + endport=None): + """ + Creates a new Security Group Ingress rule + + @param domainid: An optional domainId for the security group. + If the account parameter is used, + domainId must also be used. + @type domainid: C{uuid} + + @param startport: Start port for this ingress rule + @type startport: C{int} + + @param securitygroupid: The ID of the security group. + Mutually exclusive with securityGroupName + parameter + @type securitygroupid: C{uuid} + + @param cidrlist: The cidr list associated + @type cidrlist: C{list} + + @param usersecuritygrouplist: user to security group mapping + @type usersecuritygrouplist: C{map} + + @param securitygroupname: The name of the security group. + Mutually exclusive with + securityGroupName parameter + @type securitygroupname: C{str} + + @param account: An optional account for the security group. + Must be used with domainId. + @type account: C{str} + + @param icmpcode: Error code for this icmp message + @type icmpcode: C{int} + + @param protocol: TCP is default. UDP is the other supported protocol + @type protocol: C{str} + + @param icmptype: type of the icmp message being sent + @type icmptype: C{int} + + @param projectid: An optional project of the security group + @type projectid: C{uuid} + + @param endport: end port for this ingress rule + @type endport: C{int} + + @rtype: C{list} + """ + + protocol = protocol.upper() + if protocol not in ('TCP', 'ICMP'): + raise LibcloudError('Only TCP and ICMP are allowed') + + args = { + 'securitygroupname': securitygroupname, + 'protocol': protocol, + 'startport': int(startport), + 'cidrlist': cidrlist + } + if endport is None: + args['endport'] = int(startport) + + return self._async_request('authorizeSecurityGroupIngress', + **args)['securitygroup'] + def ex_register_iso(self, name, url, location=None, **kwargs): """ Registers an existing ISO by URL. diff --git a/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json b/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json new file mode 100644 index 0000000000..262ab05564 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json @@ -0,0 +1,2 @@ +{ "authorizesecuritygroupingressresponse" : {"jobid":17200} } + diff --git a/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json b/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json new file mode 100644 index 0000000000..0d3cdaf5f0 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json @@ -0,0 +1 @@ +{"createsecuritygroupresponse":{"securitygroup":{"account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","id":"895f9e41-4d89-468e-9b69-19a8f0d3a889","domain":"runseb@gmail.com","name":"MySG"}}} diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json new file mode 100644 index 0000000000..9e2c854042 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json @@ -0,0 +1,2 @@ +{ "deletesshkeypairresponse" : { "success" : "true"} } + diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json new file mode 100644 index 0000000000..19ba7237ad --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json @@ -0,0 +1,2 @@ +{ "deletesecuritygroupresponse" : { "success" : "true"} } + diff --git a/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json b/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json new file mode 100644 index 0000000000..d7728aaa18 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json @@ -0,0 +1 @@ +{"listsshkeypairsresponse":{"count":1,"sshkeypair":[{"name":"cs-keypair","fingerprint":"00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json b/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json new file mode 100644 index 0000000000..fa9977a248 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json @@ -0,0 +1 @@ +{"listsecuritygroupsresponse":{"count":1,"securitygroup":[{"egressrule":[],"account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","description":"Default Security Group","tags":[],"domain":"runseb@gmail.com","ingressrule":[{"startport":22,"cidr":"0.0.0.0/0","protocol":"tcp","endport":22,"ruleid":"489e83b2-5498-4c17-8a28-e6a96b35ac2d"}],"id":"ebfa2339-e9ae-4dcb-b73c-a76cd3fce39e","name":"default"}]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json new file mode 100644 index 0000000000..43ca40d047 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json new file mode 100644 index 0000000000..485175acea --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Stopped","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json new file mode 100644 index 0000000000..d028ab4949 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json @@ -0,0 +1 @@ +{"queryasyncjobresultresponse":{"jobid":17200,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"securitygroup":[{"egressrule":[],"account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","name":"MySG","domain":"runseb@gmail.com","ingressrule":[{"startport":22,"cidr":"0.0.0.0/0","protocol":"tcp","endport":22,"ruleid":"7df1edc8-6e56-48d7-b816-39377506d787"}],"id":"fa334c44-21c6-4809-ad7d-287bbb23c29b"}]}}} diff --git a/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json b/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json new file mode 100644 index 0000000000..750e493725 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json @@ -0,0 +1 @@ +{ "startvirtualmachineresponse" : {"jobid":17188} } diff --git a/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json b/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json new file mode 100644 index 0000000000..cb30e00498 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json @@ -0,0 +1 @@ +{ "stopvirtualmachineresponse" : {"jobid":17199} } diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index b1e8ab1654..2e72fc31d3 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -1,3 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys import unittest @@ -17,11 +32,13 @@ from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver from libcloud.compute.types import DeploymentError, LibcloudError +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation from libcloud.test import MockHttpTestCase from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures + class CloudStackNodeDriverTest(unittest.TestCase, TestCaseMixin): def setUp(self): CloudStackNodeDriver.connectionCls.conn_classes = \ @@ -104,9 +121,9 @@ def test_create_volume_no_noncustomized_offering_with_size(self): location = self.driver.list_locations()[0] self.assertRaises( - LibcloudError, - self.driver.create_volume, - 'vol-0', location, 11) + LibcloudError, + self.driver.create_volume, + 'vol-0', location, 11) def test_create_volume_with_custom_disk_size_offering(self): CloudStackMockHttp.fixture_tag = 'withcustomdisksize' @@ -128,6 +145,60 @@ def test_attach_volume(self): self.assertTrue(attachReturnVal) + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + self.assertEquals('test', node.name) + + def test_list_locations(self): + location = self.driver.list_locations()[0] + self.assertEquals('Sydney', location.name) + + def test_start_node(self): + node = self.driver.list_nodes()[0] + res = node.ex_start() + self.assertEquals('Starting', res) + + def test_stop_node(self): + node = self.driver.list_nodes()[0] + res = node.ex_stop() + self.assertEquals('Stopped', res) + + def test_list_keypairs(self): + keypairs = self.driver.ex_list_keypairs() + fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ + '00:00:00:00:00' + + self.assertEqual(keypairs[0]['name'], 'cs-keypair') + self.assertEqual(keypairs[0]['fingerprint'], fingerprint) + + def test_create_keypair(self): + self.assertRaises(LibcloudError, self.driver.ex_create_keypair, + 'cs-keypair') + + def test_delete_keypair(self): + res = self.driver.ex_delete_keypair('cs-keypair') + self.assertTrue(res) + + def test_list_security_groups(self): + groups = self.driver.ex_list_security_groups() + self.assertEqual(groups[0]['name'], 'default') + + def test_create_security_group(self): + group = self.driver.ex_create_security_group(name='MySG') + self.assertEqual(group['name'], 'MySG') + + def test_delete_security_group(self): + res = self.driver.ex_delete_security_group(name='MySG') + self.assertTrue(res) + + def test_authorize_security_group_ingress(self): + res = self.driver.ex_authorize_security_group_ingress('MySG', + 'TCP', + '22', + '22', + '0.0.0.0/0') + self.assertTrue(res) + class CloudStackMockHttp(MockHttpTestCase): fixtures = ComputeFileFixtures('cloudstack') From 9f59574c5b65aaa306538259837d2649feb1250b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Thu, 6 Jun 2013 22:07:02 +0000 Subject: [PATCH 065/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1490459 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 +++ libcloud/compute/base.py | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index f53b5b763a..884ebc28df 100644 --- a/CHANGES +++ b/CHANGES @@ -78,6 +78,9 @@ Changes with Apache Libcloud in deveploment: CloudStack driver. (LIBCLOUD-338) [sebastien goasguen] + - Fix old _wait_until_running method. (LIBCLOUD-339) + [Bob Thompson] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 66be6a5205..c2714799f5 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -773,10 +773,10 @@ def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): # This is here for backward compatibility and will be removed in the # next major release - return self._wait_until_running(nodes=[node], wait_period=wait_period, - timeout=timeout, - ssh_interface=ssh_interface, - force_ipv4=force_ipv4) + return self.wait_until_running(nodes=[node], wait_period=wait_period, + timeout=timeout, + ssh_interface=ssh_interface, + force_ipv4=force_ipv4) def wait_until_running(self, nodes, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): From fbcfa07642478f3e6782f88efda003276e6f89c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 11 Jun 2013 18:58:43 +0000 Subject: [PATCH 066/143] Backport commit from trunk. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.12.x@1491904 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 +++ libcloud/compute/drivers/gogrid.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 884ebc28df..a27f3f9fa1 100644 --- a/CHANGES +++ b/CHANGES @@ -81,6 +81,9 @@ Changes with Apache Libcloud in deveploment: - Fix old _wait_until_running method. (LIBCLOUD-339) [Bob Thompson] + - Fix a bug in the GoGrid driver get_uuid method. (LIBCLOUD-341) + [Bob Thompson] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/gogrid.py b/libcloud/compute/drivers/gogrid.py index 6dd059bc82..bee9082b4d 100644 --- a/libcloud/compute/drivers/gogrid.py +++ b/libcloud/compute/drivers/gogrid.py @@ -85,7 +85,7 @@ class GoGridNode(Node): # so uuid of node should not change after add is completed def get_uuid(self): return hashlib.sha1( - b("%s:%d" % (self.public_ips, self.driver.type)) + b("%s:%s" % (self.public_ips, self.driver.type)) ).hexdigest() From 3386cfdeb38f995f9355bbcc089d20077a92c11b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Wed, 8 May 2013 22:35:25 +0000 Subject: [PATCH 067/143] Various bug fixes and improvements in the HostVirtual driver. Contributed by Dinesh Bhoopathy, part of LIBCLOUD-249. git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1480490 13f79535-47bb-0310-9956-ffa450edef68 Conflicts: CHANGES --- CHANGES | 4 + libcloud/compute/drivers/hostvirtual.py | 98 ++++++++++++------- .../fixtures/hostvirtual/create_node.json | 16 +-- libcloud/test/compute/test_hostvirtual.py | 24 +++-- 4 files changed, 82 insertions(+), 60 deletions(-) diff --git a/CHANGES b/CHANGES index a27f3f9fa1..85a13fd339 100644 --- a/CHANGES +++ b/CHANGES @@ -84,6 +84,10 @@ Changes with Apache Libcloud in deveploment: - Fix a bug in the GoGrid driver get_uuid method. (LIBCLOUD-341) [Bob Thompson] + - Various bug fixes and improvements in the HostVirtual driver. + (LIBCLOUD-249) + [Dinesh Bhoopathy] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/hostvirtual.py b/libcloud/compute/drivers/hostvirtual.py index 088f4a7743..e311224d52 100644 --- a/libcloud/compute/drivers/hostvirtual.py +++ b/libcloud/compute/drivers/hostvirtual.py @@ -17,14 +17,13 @@ Home page http://www.vr.org/ """ +import time + try: import simplejson as json except ImportError: import json - -from libcloud.utils.py3 import httplib - from libcloud.common.hostvirtual import HostVirtualResponse from libcloud.common.hostvirtual import HostVirtualConnection from libcloud.common.hostvirtual import HostVirtualException @@ -36,7 +35,6 @@ API_ROOT = '/vapi' -#API_VERSION = '0.1' NODE_STATE_MAP = { 'BUILDING': NodeState.PENDING, 'PENDING': NodeState.PENDING, @@ -47,6 +45,8 @@ 'TERMINATED': NodeState.TERMINATED # server is powered down } +DEFAULT_NODE_LOCATION_ID = 4 + class HostVirtualComputeResponse(HostVirtualResponse): pass @@ -78,8 +78,8 @@ def _to_node(self, data): extra['image'] = data['os_id'] if 'location_id' in data: extra['location'] = data['location_id'] - - public_ips.append(data['ip']) + if 'ip' in data: + public_ips.append(data['ip']) node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state, public_ips=public_ips, private_ips=private_ips, @@ -137,40 +137,59 @@ def list_nodes(self): nodes.append(node) return nodes + def _wait_for_node(self, node_id, timeout=30, interval=5.0): + """ + @param node_id: ID of the node to wait for. + @type node_id: C{int} + + @param timeout: Timeout (in seconds). + @type timeout: C{int} + + @param interval: How long to wait (in seconds) between each attempt. + @type interval: C{float} + """ + # poll until we get a node + for i in range(0, timeout, int(interval)): + try: + node = self.ex_get_node(node_id) + return node + except HostVirtualException: + time.sleep(interval) + + raise HostVirtualException(412, 'Timedout on getting node details') + def create_node(self, **kwargs): - name = kwargs['name'] # expects fqdn ex: test.com + dc = None + size = kwargs['size'] image = kwargs['image'] - auth = kwargs['auth'] - dc = None - if "location" in kwargs: - dc = kwargs["location"].id - else: - dc = '3' + params = {'plan': size.name} - params = {'fqdn': name, - 'plan': size.name, - 'image': image.id, - 'location': dc - } + dc = DEFAULT_NODE_LOCATION_ID + if 'location' in kwargs: + dc = kwargs['location'].id - ssh_key = None - password = None - if isinstance(auth, NodeAuthSSHKey): - ssh_key = auth.pubkey - params['ssh_key'] = ssh_key - elif isinstance(auth, NodeAuthPassword): - password = auth.password - params['password'] = password - - if not ssh_key and not password: - raise HostVirtualException(500, "Need SSH key or Root password") - - result = self.connection.request(API_ROOT + '/cloud/buy_build', + # simply order a package first + result = self.connection.request(API_ROOT + '/cloud/buy/', data=json.dumps(params), method='POST').object - return self._to_node(result) + + # create a stub node + stub_node = self._to_node({ + 'mbpkgid': result['id'], + 'status': 'PENDING', + 'fqdn': kwargs['name'], + 'plan_id': size.id, + 'os_id': image.id, + 'location_id': dc + }) + + # provisioning a server using the stub node + self.ex_provision_node(node=stub_node, auth=kwargs['auth']) + + node = self._wait_for_node(stub_node.id) + return node def reboot_node(self, node): params = {'force': 0, 'mbpkgid': node.id} @@ -182,7 +201,11 @@ def reboot_node(self, node): return bool(result) def destroy_node(self, node): - params = {'mbpkgid': node.id} + params = { + 'mbpkgid': node.id, + #'reason': 'Submitted through Libcloud API' + } + result = self.connection.request( API_ROOT + '/cloud/cancel', data=json.dumps(params), method='POST').object @@ -216,7 +239,7 @@ def ex_stop_node(self, node): """ params = {'force': 0, 'mbpkgid': node.id} result = self.connection.request( - API_ROOT + '/cloud/server/stop', + API_ROOT + '/cloud/server/shutdown', data=json.dumps(params), method='POST').object @@ -239,9 +262,9 @@ def ex_start_node(self, node): return bool(result) - def ex_build_node(self, **kwargs): + def ex_provision_node(self, **kwargs): """ - Build a server on a VR package and get it booted + Provision a server on a VR package and get it booted @keyword node: node which should be used @type node: L{Node} @@ -255,7 +278,8 @@ def ex_build_node(self, **kwargs): @keyword location: which datacenter to create the server in @type location: L{NodeLocation} - @rtype: C{bool} + @return: Node representing the newly built server + @rtype: L{Node} """ node = kwargs['node'] diff --git a/libcloud/test/compute/fixtures/hostvirtual/create_node.json b/libcloud/test/compute/fixtures/hostvirtual/create_node.json index fd92b81242..b9b3a09fbf 100644 --- a/libcloud/test/compute/fixtures/hostvirtual/create_node.json +++ b/libcloud/test/compute/fixtures/hostvirtual/create_node.json @@ -1,17 +1,3 @@ { - "mbpkgid": "76070", - "package_status": "Active", - "domu_package": null, - "rescue": null, - "locked": null, - "state": null, - "installed": null, - "package": null, - "ipv6": "", - "city": null, - "fqdn": "test.com", - "uptime": false, - "ip": null, - "name": "VR512", - "status": "BUILDING" + "id": "62291" } diff --git a/libcloud/test/compute/test_hostvirtual.py b/libcloud/test/compute/test_hostvirtual.py index 9a83b1588d..9b90554b0d 100644 --- a/libcloud/test/compute/test_hostvirtual.py +++ b/libcloud/test/compute/test_hostvirtual.py @@ -103,13 +103,21 @@ def test_create_node(self): size=size, auth=auth ) - self.assertEqual('76070', node.id) - self.assertEqual('test.com', node.name) + self.assertEqual('62291', node.id) + self.assertEqual('server1.vr-cluster.org', node.name) - def test_ex_build_node(self): + def test_ex_provision_node(self): node = self.driver.list_nodes()[0] auth = NodeAuthPassword('vr!@#hosted#@!') - self.assertTrue(self.driver.ex_build_node( + self.assertTrue(self.driver.ex_provision_node( + node=node, + auth=auth + )) + + def test_ex_provision_node(self): + node = self.driver.list_nodes()[0] + auth = NodeAuthPassword('vr!@#hosted#@!') + self.assertTrue(self.driver.ex_provision_node( node=node, auth=auth )) @@ -126,8 +134,8 @@ def test_create_node_in_location(self): auth=auth, location=location ) - self.assertEqual('76070', node.id) - self.assertEqual('test.com', node.name) + self.assertEqual('62291', node.id) + self.assertEqual('server1.vr-cluster.org', node.name) class HostVirtualMockHttp(MockHttp): @@ -161,7 +169,7 @@ def _vapi_cloud_server_reboot(self, method, url, body, headers): body = self.fixtures.load('node_reboot.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _vapi_cloud_server_stop(self, method, url, body, headers): + def _vapi_cloud_server_shutdown(self, method, url, body, headers): body = self.fixtures.load('node_stop.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) @@ -169,7 +177,7 @@ def _vapi_cloud_server_start(self, method, url, body, headers): body = self.fixtures.load('node_start.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _vapi_cloud_buy_build(self, method, url, body, headers): + def _vapi_cloud_buy(self, method, url, body, headers): body = self.fixtures.load('create_node.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) From e40429596a2fde770cbd26a15e74f9b9742812c2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 20:14:00 -0700 Subject: [PATCH 068/143] Update readme to include a better description. --- README | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/README b/README index 0cfc18d43e..e32ba3c7b6 100644 --- a/README +++ b/README @@ -1,14 +1,24 @@ -Apache libcloud - a unified interface into the cloud +Apache Libcloud - a unified interface into the cloud ==================================================== -The goal of this project is to create a basic yet functional standard library -into various cloud providers. +Apache Libcloud is a Python library which hides differences between different +cloud provider APIs and allows you to manage different cloud resources +through a unified and easy to use API. + +Resource you can manage with Libcloud are divided in the following categories: + +* Cloud Servers - services such as Amazon EC2 and Rackspace CloudServers + (libcloud.compute.*) +* Cloud Storage - services such as Amazon S3 and Rackspace CloudFiles + (libcloud.storage.*) +* Load Balancers as a Service, LBaaS (libcloud.loadbalancer.*) +* DNS as a Service, DNSaaS (libcloud.dns.*) Apache libcloud is an Apache project, see for more information. For API documentation and examples, see: - + Feedback ======== From 03e7084230db162e31e20247e54aa427121a0203 Mon Sep 17 00:00:00 2001 From: cloudnull Date: Thu, 13 Jun 2013 16:47:59 -0500 Subject: [PATCH 069/143] modified timeout in deploy node method. This was modified such that the the timeout is respected when passed as a kwargs. Signed-off-by: Tomaz Muraus --- libcloud/compute/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index c2714799f5..829ec37cbb 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -658,7 +658,8 @@ def deploy_node(self, **kwargs): try: node, ip_addresses = self.wait_until_running( nodes=[node], - wait_period=3, timeout=NODE_ONLINE_WAIT_TIMEOUT, + wait_period=3, + timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT), ssh_interface=ssh_interface)[0] except Exception: e = sys.exc_info()[1] From 742d0832f16f7219b9a377d7c3593a0e1b6de48c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 21:28:12 -0700 Subject: [PATCH 070/143] Update changes file. Conflicts: CHANGES --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 85a13fd339..7b4f5a8e93 100644 --- a/CHANGES +++ b/CHANGES @@ -88,6 +88,9 @@ Changes with Apache Libcloud in deveploment: (LIBCLOUD-249) [Dinesh Bhoopathy] + - Fix a bug with deploy_node not respecting 'timeout' kwarg. + [Kevin Carter] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles From b463c62837d92a9e4974ea93003a9aecbedcfd02 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 22:13:40 -0700 Subject: [PATCH 071/143] Decouple Connection from the driver and don't require 'driver' attribute to be set. --- libcloud/common/base.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index c03a5ea286..a1a6c42fc9 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -505,10 +505,17 @@ def connect(self, host=None, port=None, base_url=None): self.connection = connection def _user_agent(self): - return 'libcloud/%s (%s)%s' % ( + user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua]) + + if self.driver: + user_agent = 'libcloud/%s (%s) %s' % ( libcloud.__version__, - self.driver.name, - "".join([" (%s)" % x for x in self.ua])) + self.driver.name, user_agent_suffix) + else: + user_agent = 'libcloud/%s %s' % ( + libcloud.__version__, user_agent_suffix) + + return user_agent def user_agent_append(self, token): """ From fa1a5314c5bb9c2fba4cdfa720cd7b68199d74d7 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 22:16:02 -0700 Subject: [PATCH 072/143] Modify Connection.request so it works correctly if 'params' value is a sequence. --- libcloud/common/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index a1a6c42fc9..a5d2a49a4e 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -603,9 +603,9 @@ def request(self, if params: if '?' in action: - url = '&'.join((action, urlencode(params))) + url = '&'.join((action, urlencode(params, doseq=True))) else: - url = '?'.join((action, urlencode(params))) + url = '?'.join((action, urlencode(params, doseq=True))) else: url = action From e94106f3b3d3d33cc403f5c0ee9cf0dadf697143 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 22:28:37 -0700 Subject: [PATCH 073/143] Default data to None. --- libcloud/common/base.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index a5d2a49a4e..ae0f732190 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -529,13 +529,8 @@ def user_agent_append(self, token): """ self.ua.append(token) - def request(self, - action, - params=None, - data='', - headers=None, - method='GET', - raw=False): + def request(self, action, params=None, data=None, headers=None, + method='GET', raw=False): """ Request a given `action`. From e998c39d814c680662a3058cfaf5a1b41cbf668e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 22:29:19 -0700 Subject: [PATCH 074/143] Style cleanup (avoid 1 char variable name, use is not none). --- libcloud/common/base.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index ae0f732190..a2c6b159d9 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -564,31 +564,35 @@ def request(self, action, params=None, data=None, headers=None, """ if params is None: params = {} + if headers is None: headers = {} action = self.morph_action_hook(action) self.action = action self.method = method + # Extend default parameters params = self.add_default_params(params) + # Extend default headers headers = self.add_default_headers(headers) + # We always send a user-agent header headers.update({'User-Agent': self._user_agent()}) - # Indicate that support gzip and deflate compression + # Indicate that we support gzip and deflate compression headers.update({'Accept-Encoding': 'gzip,deflate'}) - p = int(self.port) + port = int(self.port) - if p not in (80, 443): - headers.update({'Host': "%s:%d" % (self.host, p)}) + if port not in (80, 443): + headers.update({'Host': "%s:%d" % (self.host, port)}) else: headers.update({'Host': self.host}) # Encode data if necessary - if data != '' and data != None: + if data != '' and data is not None: data = self.encode_data(data) if data is not None: From fd76cf64c26fbbe865eb281699bbf6def2032ed2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 22:30:09 -0700 Subject: [PATCH 075/143] Don't send Content-Length header if len(data) == 0 and default all data arguments to None. --- libcloud/common/base.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index a2c6b159d9..464bec861a 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -592,10 +592,8 @@ def request(self, action, params=None, data=None, headers=None, headers.update({'Host': self.host}) # Encode data if necessary - if data != '' and data is not None: + if data: data = self.encode_data(data) - - if data is not None: headers.update({'Content-Length': str(len(data))}) params, headers = self.pre_connect_hook(params, headers) @@ -692,7 +690,7 @@ class PollingConnection(Connection): timeout = 200 request_method = 'request' - def async_request(self, action, params=None, data='', headers=None, + def async_request(self, action, params=None, data=None, headers=None, method='GET', context=None): """ Perform an 'async' request to the specified path. Keep in mind that @@ -758,7 +756,7 @@ def async_request(self, action, params=None, data='', headers=None, return response - def get_request_kwargs(self, action, params=None, data='', headers=None, + def get_request_kwargs(self, action, params=None, data=None, headers=None, method='GET', context=None): """ Arguments which are passed to the initial request() call inside From 9bf618c4b79379cc584f8d293974eefda3939741 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 23:05:47 -0700 Subject: [PATCH 076/143] Update affected test and mockhttp class. --- libcloud/test/__init__.py | 4 ++-- libcloud/test/compute/test_brightbox.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/libcloud/test/__init__.py b/libcloud/test/__init__.py index c6c8dffff2..20c8a75630 100644 --- a/libcloud/test/__init__.py +++ b/libcloud/test/__init__.py @@ -77,9 +77,9 @@ class MockResponse(object): reason = '' version = 11 - def __init__(self, status, body, headers=None, reason=None): + def __init__(self, status, body=None, headers=None, reason=None): self.status = status - self.body = StringIO(u(body)) + self.body = StringIO(u(body)) if body else StringIO() self.headers = headers or self.headers self.reason = reason or self.reason diff --git a/libcloud/test/compute/test_brightbox.py b/libcloud/test/compute/test_brightbox.py index 95c0648b1f..b9f3ce2098 100644 --- a/libcloud/test/compute/test_brightbox.py +++ b/libcloud/test/compute/test_brightbox.py @@ -285,13 +285,16 @@ def _1_0_zones(self, method, url, body, headers): return self.response(httplib.OK, self.fixtures.load('list_zones.json')) def _2_0_zones(self, method, url, body, headers): return self.response(httplib.BAD_REQUEST, '{"error_name":"unrecognised_endpoint", "errors": ["The request was for an unrecognised API endpoint"]}') - + def _1_0_cloud_ips(self, method, url, body, headers): if method == 'GET': return self.response(httplib.OK, self.fixtures.load('list_cloud_ips.json')) elif method == 'POST': - body = json.loads(body) + if body: + body = json.loads(body) + node = json.loads(self.fixtures.load('create_cloud_ip.json')) + if 'reverse_dns' in body: node['reverse_dns'] = body['reverse_dns'] return self.response(httplib.ACCEPTED, json.dumps(node)) @@ -305,7 +308,7 @@ def _1_0_cloud_ips_cip_jsjc5(self, method, url, body, headers): return self.response(httplib.OK, '') else: return self.response(httplib.BAD_REQUEST, '{"error_name":"bad dns", "errors": ["Bad dns"]}') - + def _1_0_cloud_ips_cip_jsjc5_map(self, method, url, body, headers): if method == 'POST': body = json.loads(body) From 3e77f66bf979c24fd8dd3f7c121096df455b24a3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 23:06:11 -0700 Subject: [PATCH 077/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 7b4f5a8e93..20795ed39b 100644 --- a/CHANGES +++ b/CHANGES @@ -18,6 +18,10 @@ Changes with Apache Libcloud in deveploment: authenticate() call. [Tomaz Muraus] + - Modify base Connection class to not send Content-Length header if body is + not provided. + [Tomaz Muraus] + *) Compute - Fix destroy_node method in the experimental libvirt driver. From a7997cea64660d9541de07f3857ad59888d9d505 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 15 Jun 2013 00:49:52 -0700 Subject: [PATCH 078/143] Update .ratignore. --- .ratignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ratignore b/.ratignore index f113a4d195..ba54ecd9cd 100644 --- a/.ratignore +++ b/.ratignore @@ -1,12 +1,15 @@ +RELEASING MANIFEST MANIFEST.in .gitignore +tox.ini apidocs/ CHANGES HACKING test/storage/fixtures/ test/compute/fixtures/ test/loadbalancer/fixtures/ +test/dns/fixtures/ coverage_html_report/ .coverage .coveragerc @@ -14,5 +17,7 @@ libcloud/data/pricing.json libcloud/common/__init__.py libcloud/compute/__init__.py libcloud/storage/__init__.py +libcloud/loadbalancer/__init__.py +libcloud/dns/__init__.py test/storage/__init__.py test/pricing_test.json From d7c8d21a2513ce6bba0a9db2dc6f6a673967f610 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 15 Jun 2013 00:51:13 -0700 Subject: [PATCH 079/143] Add missing license headers. --- libcloud/test/common/test_cloudstack.py | 15 +++++++++++++++ libcloud/test/common/test_gandi.py | 15 +++++++++++++++ libcloud/test/common/test_openstack.py | 15 +++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/libcloud/test/common/test_cloudstack.py b/libcloud/test/common/test_cloudstack.py index 9c85c36eb4..62d6abfa86 100644 --- a/libcloud/test/common/test_cloudstack.py +++ b/libcloud/test/common/test_cloudstack.py @@ -1,3 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys import unittest diff --git a/libcloud/test/common/test_gandi.py b/libcloud/test/common/test_gandi.py index 962bd29188..d0dfc9dd8e 100644 --- a/libcloud/test/common/test_gandi.py +++ b/libcloud/test/common/test_gandi.py @@ -1,3 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from libcloud.utils.py3 import xmlrpclib from libcloud.test import MockHttp diff --git a/libcloud/test/common/test_openstack.py b/libcloud/test/common/test_openstack.py index 56042cd59d..f7a269e3d1 100644 --- a/libcloud/test/common/test_openstack.py +++ b/libcloud/test/common/test_openstack.py @@ -1,3 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys import unittest From 45338e0288511aab4e9652e63440b62a1e2289b6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 15 Jun 2013 01:06:14 -0700 Subject: [PATCH 080/143] Use parse_qs and parse_qsl from libcloud.utils.py3. --- libcloud/test/__init__.py | 3 +-- libcloud/test/common/test_cloudstack.py | 7 +------ libcloud/test/compute/test_cloudstack.py | 7 +------ libcloud/test/compute/test_ec2.py | 7 +------ libcloud/test/compute/test_gogrid.py | 6 +----- libcloud/test/compute/test_ktucloud.py | 7 +------ libcloud/test/loadbalancer/test_cloudstack.py | 7 +------ libcloud/test/storage/test_azure_blobs.py | 8 +------- libcloud/test/storage/test_s3.py | 8 +------- libcloud/utils/py3.py | 12 ++++++++++++ 10 files changed, 21 insertions(+), 51 deletions(-) diff --git a/libcloud/test/__init__.py b/libcloud/test/__init__.py index 20c8a75630..5351992328 100644 --- a/libcloud/test/__init__.py +++ b/libcloud/test/__init__.py @@ -17,11 +17,10 @@ import random import unittest -from cgi import parse_qs - from libcloud.utils.py3 import httplib from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs from libcloud.utils.py3 import u diff --git a/libcloud/test/common/test_cloudstack.py b/libcloud/test/common/test_cloudstack.py index 62d6abfa86..1c7c62474e 100644 --- a/libcloud/test/common/test_cloudstack.py +++ b/libcloud/test/common/test_cloudstack.py @@ -24,12 +24,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import b - -try: - parse_qsl = urlparse.parse_qsl -except AttributeError: - import cgi - parse_qsl = cgi.parse_qsl +from libcloud.utils.py3 import parse_qsl from libcloud.common.cloudstack import CloudStackConnection, CloudStackResponse from libcloud.common.types import MalformedResponseError diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 2e72fc31d3..864cff453c 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -18,18 +18,13 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qsl try: import simplejson as json except ImportError: import json -try: - parse_qsl = urlparse.parse_qsl -except AttributeError: - import cgi - parse_qsl = cgi.parse_qsl - from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver from libcloud.compute.types import DeploymentError, LibcloudError from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 260a4f73d4..d0e2c8a610 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -16,6 +16,7 @@ import unittest from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import parse_qsl from libcloud.compute.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver from libcloud.compute.drivers.ec2 import EC2USWestNodeDriver @@ -39,12 +40,6 @@ from libcloud.test.secrets import EC2_PARAMS -try: - parse_qsl = urlparse.parse_qsl -except AttributeError: - import cgi - parse_qsl = cgi.parse_qsl - class EC2Tests(LibcloudTestCase, TestCaseMixin): image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml' diff --git a/libcloud/test/compute/test_gogrid.py b/libcloud/test/compute/test_gogrid.py index 823f3288d5..65b9327485 100644 --- a/libcloud/test/compute/test_gogrid.py +++ b/libcloud/test/compute/test_gogrid.py @@ -17,6 +17,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs from libcloud.compute.base import NodeState, NodeLocation from libcloud.common.types import LibcloudError, InvalidCredsError @@ -266,11 +267,6 @@ def _api_grid_image_edit(self, method, url, body, headers): def _api_common_lookup_list(self, method, url, body, headers): _valid_lookups = ("ip.datacenter",) - try: - from urlparse import parse_qs - except ImportError: - from cgi import parse_qs - lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0] if lookup in _valid_lookups: fixture_path = "lookup_list_%s.json" % \ diff --git a/libcloud/test/compute/test_ktucloud.py b/libcloud/test/compute/test_ktucloud.py index 8a637c0264..319d6b02e6 100644 --- a/libcloud/test/compute/test_ktucloud.py +++ b/libcloud/test/compute/test_ktucloud.py @@ -18,18 +18,13 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qsl try: import simplejson as json except ImportError: import json -try: - parse_qsl = urlparse.parse_qsl -except AttributeError: - import cgi - parse_qsl = cgi.parse_qsl - from libcloud.compute.drivers.ktucloud import KTUCloudNodeDriver from libcloud.compute.types import DeploymentError, LibcloudError diff --git a/libcloud/test/loadbalancer/test_cloudstack.py b/libcloud/test/loadbalancer/test_cloudstack.py index b4ea3a27cc..6c924b9bb1 100644 --- a/libcloud/test/loadbalancer/test_cloudstack.py +++ b/libcloud/test/loadbalancer/test_cloudstack.py @@ -8,12 +8,7 @@ from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse - -try: - parse_qsl = urlparse.parse_qsl -except AttributeError: - import cgi - parse_qsl = cgi.parse_qsl +from libcloud.utils.py3 import parse_qsl from libcloud.common.types import LibcloudError from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm diff --git a/libcloud/test/storage/test_azure_blobs.py b/libcloud/test/storage/test_azure_blobs.py index eb30cc4cef..3255d9d78c 100644 --- a/libcloud/test/storage/test_azure_blobs.py +++ b/libcloud/test/storage/test_azure_blobs.py @@ -23,6 +23,7 @@ from xml.etree import ElementTree as ET from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError @@ -44,13 +45,6 @@ from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS -try: - parse_qs = urlparse.parse_qs -except AttributeError: - import cgi - parse_qs = cgi.parse_qs - - class AzureBlobsMockHttp(StorageMockHttp, MockHttpTestCase): fixtures = StorageFileFixtures('azure_blobs') diff --git a/libcloud/test/storage/test_s3.py b/libcloud/test/storage/test_s3.py index ff9620237d..2287d5cc4e 100644 --- a/libcloud/test/storage/test_s3.py +++ b/libcloud/test/storage/test_s3.py @@ -20,6 +20,7 @@ from xml.etree import ElementTree as ET from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError @@ -41,13 +42,6 @@ from libcloud.test.secrets import STORAGE_S3_PARAMS -try: - parse_qs = urlparse.parse_qs -except AttributeError: - import cgi - parse_qs = cgi.parse_qs - - class S3MockHttp(StorageMockHttp, MockHttpTestCase): fixtures = StorageFileFixtures('s3') diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 3a41bc12eb..18055f04ed 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -59,6 +59,9 @@ from builtins import bytes from builtins import next + parse_qs = urlparse.parse_qs + parse_qsl = urlparse.parse_qsl + basestring = str def method_type(callable, instance, klass): @@ -94,6 +97,15 @@ def tostring(node): from __builtin__ import reload + if PY25: + import cgi + + parse_qs = cgi.parse_qs + parse_qsl = cgi.parse_qsl + else: + parse_qs = urlparse.parse_qs + parse_qsl = urlparse.parse_qsl + if not PY25: from os.path import relpath From 2abe75454fd9c037bafec16ace6734906de357d5 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 19 Jun 2013 11:41:49 -0700 Subject: [PATCH 081/143] Fixed a typo in the load balancers example. Signed-off-by: Tomaz Muraus --- example_loadbalancer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example_loadbalancer.py b/example_loadbalancer.py index 2852290355..c480cc4a78 100644 --- a/example_loadbalancer.py +++ b/example_loadbalancer.py @@ -42,7 +42,7 @@ def main(): algorithm=Algorithm.ROUND_ROBIN, port=80, protocol='http', - members) + members=members) print(new_balancer) From 9901b55fd712d35b4223819eb027e3ba4cafbe18 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Jun 2013 12:16:38 +0200 Subject: [PATCH 082/143] Fix a bug in the test case - should use UTC timestamp. --- libcloud/test/compute/test_openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index a69b998f45..e77aefb735 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -189,7 +189,7 @@ def test_token_expiration_and_force_reauthentication(self): # No force reauth, valid / non-expired token which is about to expire in # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS - soon = datetime.datetime.now() + \ + soon = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) osa.auth_token = None From ac022ce583cb901bd1ae0d045f74a91cc3ecbed7 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Thu, 20 Jun 2013 15:50:20 -0400 Subject: [PATCH 083/143] LIBCLOUD-345: Fix CloudStack compute driver create_node method to return CloudStackNode and add new expunging node state. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/cloudstack.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index d2c1799085..eb95a67304 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -118,7 +118,8 @@ class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver): 'Starting': NodeState.REBOOTING, 'Stopped': NodeState.TERMINATED, 'Stopping': NodeState.TERMINATED, - 'Destroyed': NodeState.TERMINATED + 'Destroyed': NodeState.TERMINATED, + 'Expunging': NodeState.TERMINATED } def __init__(self, key, secret=None, secure=True, host=None, @@ -274,7 +275,7 @@ def create_node(self, name, size, image, location=None, extra_args=None, public_ips = [] private_ips = [nic['ipaddress'] for nic in node['nic']] - return Node( + return CloudStackNode( id=node['id'], name=node['displayname'], state=state, From b5f4a42ac3761feb2f5401679daf96a70ebb9147 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Jun 2013 12:23:50 +0200 Subject: [PATCH 084/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 20795ed39b..987b3465ce 100644 --- a/CHANGES +++ b/CHANGES @@ -95,6 +95,10 @@ Changes with Apache Libcloud in deveploment: - Fix a bug with deploy_node not respecting 'timeout' kwarg. [Kevin Carter] + - Modify create_node method in CloudStack driver to return an instance of + CloudStackNode and add a new "expunging" node state. (LIBCLOUD-345) + [sebastien goasguen] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles From 7bc2d89c707956f1f13bf93247a8e1ab76caffa6 Mon Sep 17 00:00:00 2001 From: Bob Thompson Date: Fri, 21 Jun 2013 15:43:07 -0400 Subject: [PATCH 085/143] Issue LIBCLOUD-346: Fix ElasticHosts API endpoint host names. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/elastichosts.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/libcloud/compute/drivers/elastichosts.py b/libcloud/compute/drivers/elastichosts.py index 9b69cac2f8..ea801ee8cc 100644 --- a/libcloud/compute/drivers/elastichosts.py +++ b/libcloud/compute/drivers/elastichosts.py @@ -27,42 +27,42 @@ 'uk-1': { 'name': 'London Peer 1', 'country': 'United Kingdom', - 'host': 'api.lon-p.elastichosts.com' + 'host': 'api-lon-p.elastichosts.com' }, 'uk-2': { 'name': 'London BlueSquare', 'country': 'United Kingdom', - 'host': 'api.lon-b.elastichosts.com' + 'host': 'api-lon-b.elastichosts.com' }, 'us-1': { 'name': 'San Antonio Peer 1', 'country': 'United States', - 'host': 'api.sat-p.elastichosts.com' + 'host': 'api-sat-p.elastichosts.com' }, 'us-2': { 'name': 'Los Angeles Peer 1', 'country': 'United States', - 'host': 'api.lax-p.elastichosts.com' + 'host': 'api-lax-p.elastichosts.com' }, 'us-3': { 'name': 'San Jose (Silicon Valley)', 'country': 'United States', - 'host': 'api.sjc-c.elastichosts.com' + 'host': 'api-sjc-c.elastichosts.com' }, 'ca-1': { 'name': 'Toronto Peer 1', 'country': 'Canada', - 'host': 'api.tor-p.elastichosts.com' + 'host': 'api-tor-p.elastichosts.com' }, 'au-1': { 'name': 'Sydney', 'country': 'Australia', - 'host': 'api.syd-v.elastichosts.com' + 'host': 'api-syd-v.elastichosts.com' }, 'cn-1': { 'name': 'Hong Kong', 'country': 'China', - 'host': 'api.hkg-e.elastichosts.com' + 'host': 'api-hkg-e.elastichosts.com' } } From 428082821a0b7c1e05ff04239f6f94ca9d92b092 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Jun 2013 23:53:28 +0200 Subject: [PATCH 086/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 987b3465ce..d0729ad637 100644 --- a/CHANGES +++ b/CHANGES @@ -99,6 +99,10 @@ Changes with Apache Libcloud in deveploment: CloudStackNode and add a new "expunging" node state. (LIBCLOUD-345) [sebastien goasguen] + - Update API endpoint hostnames in the ElasticHost driver and use hostnames + which return a valid SSL certificate. (LIBCLOUD-346) + [Bob Thompson] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles From 2c815ace3212610430db4144cfa0eaacff8398a6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 22 Jun 2013 20:37:31 +0200 Subject: [PATCH 087/143] Add a driver for Rackspace's new datacenter in Sydney, Australia. --- CHANGES | 3 +++ libcloud/compute/drivers/rackspace.py | 37 +++++++++++++++++++++++++-- libcloud/compute/providers.py | 2 ++ libcloud/compute/types.py | 1 + 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index d0729ad637..bcbfd4c3a3 100644 --- a/CHANGES +++ b/CHANGES @@ -103,6 +103,9 @@ Changes with Apache Libcloud in deveploment: which return a valid SSL certificate. (LIBCLOUD-346) [Bob Thompson] + - Add a driver for Rackspace's new datacenter in Sydney, Australia. + [Alex Gaynor, Tomaz Muraus] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/rackspace.py b/libcloud/compute/drivers/rackspace.py index 2f8a329fb0..203a73b7a7 100644 --- a/libcloud/compute/drivers/rackspace.py +++ b/libcloud/compute/drivers/rackspace.py @@ -34,8 +34,8 @@ class RackspaceConnection(OpenStack_1_0_Connection): XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' def get_endpoint(self): - ep = {} + if '2.0' in self._auth_version: ep = self.service_catalog.get_endpoint(service_type='compute', name='cloudServers') @@ -74,7 +74,8 @@ class RackspaceUKConnection(RackspaceConnection): class RackspaceUKNodeDriver(RackspaceNodeDriver): - """Driver for Rackspace in the UK (London) + """ + Driver for Rackspace in the UK (London) """ name = 'Rackspace (UK)' @@ -82,3 +83,35 @@ class RackspaceUKNodeDriver(RackspaceNodeDriver): def list_locations(self): return [NodeLocation(0, 'Rackspace UK London', 'UK', self)] + + +class RackspaceAUConnection(RackspaceConnection): + """ + Connection class for the Rackspace Sydney datacenter + """ + + auth_url = AUTH_URL_US + _auth_version = '2.0' + + def get_endpoint(self): + ep = {} + + ep = self.service_catalog.get_endpoint(service_type='compute', + name='cloudServersOpenStack', + region='SYD') + + if 'publicURL' in ep: + return ep['publicURL'] + + raise LibcloudError('Could not find specified endpoint') + + +class RackspaceAUNodeDriver(RackspaceNodeDriver): + """Driver for Rackspace in the UK (London) + """ + + name = 'Rackspace (Sydney, Australia)' + connectionCls = RackspaceAUConnection + + def list_locations(self): + return [NodeLocation(0, 'Rackspace Sydney, Australia', 'AU', self)] diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 8e23f4fefa..54f0aa9566 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -70,6 +70,8 @@ ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), Provider.RACKSPACE_UK: ('libcloud.compute.drivers.rackspace', 'RackspaceUKNodeDriver'), + Provider.RACKSPACE_AU: + ('libcloud.compute.drivers.rackspace', 'RackspaceAUNodeDriver'), Provider.SLICEHOST: ('libcloud.compute.drivers.slicehost', 'SlicehostNodeDriver'), Provider.VPSNET: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index dabe50e78e..2f076cd90a 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -124,6 +124,7 @@ class Provider(object): RACKSPACE = 'rackspace' RACKSPACE_UK = 'rackspace_uk' + RACKSPACE_AU = 'rackspace_au' RACKSPACE_NOVA_BETA = 'rackspace_nova_beta' RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw' RACKSPACE_NOVA_LON = 'rackspace_nova_lon' From 8dda67103c0bb26548c0fea9519f6316b4062d51 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 23 Jun 2013 09:08:56 +0200 Subject: [PATCH 088/143] Add unittest2 dependency for tests and use it if Python version is not in ['2.7', '3.x']. Also update some of the affected tests to use unittest2. --- libcloud/test/__init__.py | 7 ++++++- libcloud/test/loadbalancer/test_rackspace.py | 16 ++++++++-------- libcloud/utils/py3.py | 6 ++++++ setup.py | 12 ++++++++++++ tox.ini | 2 ++ 5 files changed, 34 insertions(+), 9 deletions(-) diff --git a/libcloud/test/__init__.py b/libcloud/test/__init__.py index 5351992328..86fcc47fa9 100644 --- a/libcloud/test/__init__.py +++ b/libcloud/test/__init__.py @@ -15,13 +15,18 @@ import sys import random -import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qs from libcloud.utils.py3 import u +from libcloud.utils.py3 import unittest2_required + +if unittest2_required: + import unittest2 as unittest +else: + import unittest XML_HEADERS = {'content-type': 'application/xml'} diff --git a/libcloud/test/loadbalancer/test_rackspace.py b/libcloud/test/loadbalancer/test_rackspace.py index 71de39a99b..9e8ab68b35 100644 --- a/libcloud/test/loadbalancer/test_rackspace.py +++ b/libcloud/test/loadbalancer/test_rackspace.py @@ -15,7 +15,6 @@ import sys import datetime -import unittest try: import simplejson as json @@ -32,6 +31,7 @@ from libcloud.loadbalancer.drivers.rackspace import RackspaceAccessRuleType from libcloud.common.types import LibcloudError +from libcloud.test import unittest from libcloud.test import MockHttpTestCase from libcloud.test.file_fixtures import LoadBalancerFileFixtures, OpenStackFixtures @@ -1326,7 +1326,7 @@ def _v1_0_slug_loadbalancers_94700_healthmonitor(self, method, url, body, header def _v1_0_slug_loadbalancers_3130(self, method, url, body, headers): """ update_balancer(b, protocol='HTTPS'), then get_balancer('3130') """ if method == "PUT": - self.assertEqual(json.loads(body), {'protocol': 'HTTPS'}) + self.assertDictEqual(json.loads(body), {'protocol': 'HTTPS'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1338,7 +1338,7 @@ def _v1_0_slug_loadbalancers_3130(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3131(self, method, url, body, headers): """ update_balancer(b, port=443), then get_balancer('3131') """ if method == "PUT": - self.assertEqual(json.loads(body), {'port': 1337}) + self.assertDictEqual(json.loads(body), {'port': 1337}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1350,7 +1350,7 @@ def _v1_0_slug_loadbalancers_3131(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3132(self, method, url, body, headers): """ update_balancer(b, name='new_lb_name'), then get_balancer('3132') """ if method == "PUT": - self.assertEqual(json.loads(body), {'name': 'new_lb_name'}) + self.assertDictEqual(json.loads(body), {'name': 'new_lb_name'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1362,7 +1362,7 @@ def _v1_0_slug_loadbalancers_3132(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3133(self, method, url, body, headers): """ update_balancer(b, algorithm='ROUND_ROBIN'), then get_balancer('3133') """ if method == "PUT": - self.assertEqual(json.loads(body), {'algorithm': 'ROUND_ROBIN'}) + self.assertDictEqual(json.loads(body), {'algorithm': 'ROUND_ROBIN'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1380,7 +1380,7 @@ def _v1_0_slug_loadbalancers_3134(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3135(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3135') """ if method == "PUT": - self.assertEqual(json.loads(body), {'protocol': 'IMAPv2'}) + self.assertDictEqual(json.loads(body), {'protocol': 'IMAPv2'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1392,7 +1392,7 @@ def _v1_0_slug_loadbalancers_3135(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3136(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3136') """ if method == "PUT": - self.assertEqual(json.loads(body), {'protocol': 'IMAPv3'}) + self.assertDictEqual(json.loads(body), {'protocol': 'IMAPv3'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) @@ -1404,7 +1404,7 @@ def _v1_0_slug_loadbalancers_3136(self, method, url, body, headers): def _v1_0_slug_loadbalancers_3137(self, method, url, body, headers): """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3137') """ if method == "PUT": - self.assertEqual(json.loads(body), {'protocol': 'IMAPv4'}) + self.assertDictEqual(json.loads(body), {'protocol': 'IMAPv4'}) return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) elif method == "GET": response_body = json.loads(self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 18055f04ed..0d6987dead 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -26,6 +26,7 @@ PY2 = False PY25 = False +PY27 = False PY3 = False PY32 = False @@ -35,6 +36,9 @@ if sys.version_info >= (2, 5) and sys.version_info <= (2, 6): PY25 = True +if sys.version_info >= (2, 7) and sys.version_info <= (2, 8): + PY27 = True + if sys.version_info >= (3, 0): PY3 = True @@ -141,3 +145,5 @@ def relpath(path, start=posixpath.curdir): if not rel_list: return posixpath.curdir return posixpath.join(*rel_list) + +unittest2_required = not (PY27 or PY3) diff --git a/setup.py b/setup.py index 61629f20ca..1adee5de32 100644 --- a/setup.py +++ b/setup.py @@ -31,6 +31,8 @@ import libcloud.utils.misc from libcloud.utils.dist import get_packages, get_data_files +from libcloud.utils.py3 import unittest2_required + libcloud.utils.misc.SHOW_DEPRECATION_WARNING = False @@ -88,6 +90,16 @@ def run(self): 'pip install mock') sys.exit(1) + if unittest2_required: + try: + import unittest2 + unittest2 + except ImportError: + print('Missing "unittest2" library. unittest2 is library is needed ' + 'to run the tests. You can install it using pip: ' + 'pip install unittest2') + sys.exit(1) + status = self._run_tests() sys.exit(status) diff --git a/tox.ini b/tox.ini index b6d070be4c..7212a7e259 100644 --- a/tox.ini +++ b/tox.ini @@ -5,12 +5,14 @@ setenv = [testenv] deps = mock + unittest2 lockfile paramiko commands = python setup.py test [testenv:py25] deps = mock + unittest2 lockfile ssl simplejson From 2a0479e16048a04a03347572630815b0b6a9b49a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 23 Jun 2013 18:58:11 +0200 Subject: [PATCH 089/143] Update changes. --- CHANGES | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES b/CHANGES index bcbfd4c3a3..42562299e2 100644 --- a/CHANGES +++ b/CHANGES @@ -22,6 +22,12 @@ Changes with Apache Libcloud in deveploment: not provided. [Tomaz Muraus] + *) Misc + + - Add unittest2 library dependency for tests and update some tests to use + it. + [Tomaz Muraus] + *) Compute - Fix destroy_node method in the experimental libvirt driver. From f590c9fdb62ba4d952e45ef6cc898a45d2933bf4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 23 Jun 2013 20:31:19 +0200 Subject: [PATCH 090/143] Modify Rackspace UK compute driver so it works correctly with the new auth system. --- libcloud/compute/drivers/rackspace.py | 31 +++++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/libcloud/compute/drivers/rackspace.py b/libcloud/compute/drivers/rackspace.py index 203a73b7a7..e3f4543c2b 100644 --- a/libcloud/compute/drivers/rackspace.py +++ b/libcloud/compute/drivers/rackspace.py @@ -20,8 +20,7 @@ from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\ OpenStack_1_0_NodeDriver, OpenStack_1_0_Response -from libcloud.common.rackspace import ( - AUTH_URL_US, AUTH_URL_UK) +from libcloud.common.rackspace import AUTH_URL_US, AUTH_URL_UK class RackspaceConnection(OpenStack_1_0_Connection): @@ -42,10 +41,12 @@ def get_endpoint(self): elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): ep = self.service_catalog.get_endpoint(name='cloudServers') - if 'publicURL' in ep: - return ep['publicURL'] + public_url = ep.get('publicURL', None) - raise LibcloudError('Could not find specified endpoint') + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + return public_url class RackspaceNodeDriver(OpenStack_1_0_NodeDriver): @@ -71,6 +72,24 @@ class RackspaceUKConnection(RackspaceConnection): Connection class for the Rackspace UK driver """ auth_url = AUTH_URL_UK + _auth_version = '2.0' + + def get_endpoint(self): + ep = self.service_catalog.get_endpoint(service_type='compute', + name='cloudServers') + + public_url = ep.get('publicURL', None) + + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + # Hack which is required because of how global auth works (old + # US accounts work with the lon endpoint, but don't have it in + # the service catalog) + public_url = public_url.replace('https://servers.api', + 'https://lon.servers.api') + + return public_url class RackspaceUKNodeDriver(RackspaceNodeDriver): @@ -94,8 +113,6 @@ class RackspaceAUConnection(RackspaceConnection): _auth_version = '2.0' def get_endpoint(self): - ep = {} - ep = self.service_catalog.get_endpoint(service_type='compute', name='cloudServersOpenStack', region='SYD') From 8250517005022e58e80ab8b0115ea0e47533c0c0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 25 Jun 2013 08:55:00 +0200 Subject: [PATCH 091/143] Modify release script to also generate .tar.gz archive. --- dist/release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dist/release.sh b/dist/release.sh index 15a809c803..d7a036b673 100755 --- a/dist/release.sh +++ b/dist/release.sh @@ -32,8 +32,8 @@ fi cd .. -python setup.py sdist --formats=bztar,zip +python setup.py sdist --formats=bztar,zip,gztar cd dist -./hash-sign.sh -u ${user} *.tar.bz2 *.zip +./hash-sign.sh -u ${user} *.tar.bz2 *.tar.gz *.zip From 0e7b2f46d040c4123a24824480a3bfae4203249a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 25 Jun 2013 08:57:09 +0200 Subject: [PATCH 092/143] Update .gitignore. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index b1ca781e2a..0bc25c1f09 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ MANIFEST /.coverage coverage_html_report/ .idea +dist/*apache-libcloud* From a6dfaa24ac75a12f0fa9e1dcaadc823f2f4b2dd8 Mon Sep 17 00:00:00 2001 From: Philipp Strube Date: Mon, 24 Jun 2013 15:03:04 -0700 Subject: [PATCH 093/143] Issue LIBCLOUD-349: Implementation of list_images and list_network for Cloudstack driver. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/cloudstack.py | 46 +++++++++++++++++-- .../cloudstack/listNetworks_default.json | 2 +- libcloud/test/compute/test_cloudstack.py | 42 +++++++++++++++-- 3 files changed, 81 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index eb95a67304..30c66864bc 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -99,6 +99,23 @@ def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id +class CloudStackNetwork(object): + """Class representing a CloudStack Network""" + + def __init__(self, displaytext, name, networkofferingid, id, zoneid): + self.displaytext = displaytext + self.name = name + self.networkofferingid = networkofferingid + self.id = id + self.zoneid = zoneid + + def __repr__(self): + return (('') + % (self.id, self.displaytext, self.name, + self.networkofferingid, self.zoneid, self.driver.name)) + + class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver): """Driver for the CloudStack API. @@ -157,11 +174,14 @@ def list_images(self, location=None): imgs = self._sync_request('listTemplates', **args) images = [] for img in imgs.get('template', []): - images.append(NodeImage(img['id'], img['name'], self, { - 'hypervisor': img['hypervisor'], - 'format': img['format'], - 'os': img['ostypename'], - })) + images.append(NodeImage( + id=img['id'], + name=img['name'], + driver=self.connection.driver, + extra={ + 'hypervisor': img['hypervisor'], + 'format': img['format'], + 'os': img['ostypename']})) return images def list_locations(self): @@ -363,6 +383,22 @@ def ex_list_disk_offerings(self): return diskOfferings + def ex_list_networks(self): + """List the available networks""" + + nets = self._sync_request('listNetworks')['network'] + + networks = [] + for net in nets: + networks.append(CloudStackNetwork( + net['displaytext'], + net['name'], + net['networkofferingid'], + net['id'], + net['zoneid'])) + + return networks + def create_volume(self, size, name, location, snapshot=None): # TODO Add snapshot handling for diskOffering in self.ex_list_disk_offerings(): diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json b/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json index 270134783f..7f696f01e4 100644 --- a/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json @@ -1 +1 @@ -{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } +{ "listnetworksresponse" : {"count": 1, "network": [{"domain": "ROOT", "acltype": "Domain", "specifyipranges": true, "related": "00304a04-c7ea-4e77-a786-18bc64347bf7", "zoneid": "1128bd56-b4d9-4ac6-a7b9-c715b187ce11", "domainid": "4a8857b8-7235-4e31-a7ef-b8b44d180850", "displaytext": "guestNetworkForBasicZone", "id": "00304a04-c7ea-4e77-a786-18bc64347bf7", "canusefordeploy": true, "physicalnetworkid": "07f747f5-b445-487f-b2d7-81a5a512989e", "networkdomain": "cs1cloud.internal", "service": [{"name": "SecurityGroup"}, {"name": "UserData"}, {"name": "Dhcp"}], "state": "Setup", "type": "Shared", "zonename": "CH-GV2", "networkofferingavailability": "Optional", "networkofferingid": "45964a3a-8a1c-4438-a377-0ff1e264047a", "tags": [], "networkofferingdisplaytext": "Exoscale Offering for Shared Security group enabled networks", "subdomainaccess": true, "traffictype": "Guest", "restartrequired": false, "broadcastdomaintype": "Vlan", "name": "guestNetworkForBasicZone", "dns2": "80.245.17.230", "dns1": "80.245.17.229", "networkofferingname": "ExoscaleSharedNetworkOfferingWithSGService", "issystem": false}]} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 864cff453c..31562ff87e 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -14,12 +14,13 @@ # limitations under the License. import sys -import unittest from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qsl +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + try: import simplejson as json except ImportError: @@ -29,6 +30,7 @@ from libcloud.compute.types import DeploymentError, LibcloudError from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation +from libcloud.test import unittest from libcloud.test import MockHttpTestCase from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures @@ -90,6 +92,21 @@ def test_list_images_no_images_available(self): images = self.driver.list_images() self.assertEquals(0, len(images)) + def test_list_images(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listTemplates_default.json') + templates = fixture['listtemplatesresponse']['template'] + + images = self.driver.list_images() + for i, image in enumerate(images): + # NodeImage expects id to be a string, + # the CloudStack fixture has an int + tid = str(templates[i]['id']) + tname = templates[i]['name'] + self.assertIsInstance(image.driver, CloudStackNodeDriver) + self.assertEquals(image.id, tid) + self.assertEquals(image.name, tname) + def test_ex_list_disk_offerings(self): diskOfferings = self.driver.ex_list_disk_offerings() self.assertEquals(1, len(diskOfferings)) @@ -99,6 +116,23 @@ def test_ex_list_disk_offerings(self): self.assertEquals('Disk offer 1', diskOffering.name) self.assertEquals(10, diskOffering.size) + def test_ex_list_networks(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNetworks_default.json') + fixture_networks = fixture['listnetworksresponse']['network'] + + networks = self.driver.ex_list_networks() + + for i, network in enumerate(networks): + self.assertEquals(network.id, fixture_networks[i]['id']) + self.assertEquals( + network.displaytext, fixture_networks[i]['displaytext']) + self.assertEquals(network.name, fixture_networks[i]['name']) + self.assertEquals( + network.networkofferingid, + fixture_networks[i]['networkofferingid']) + self.assertEquals(network.zoneid, fixture_networks[i]['zoneid']) + def test_create_volume(self): volumeName = 'vol-0' location = self.driver.list_locations()[0] @@ -167,8 +201,10 @@ def test_list_keypairs(self): self.assertEqual(keypairs[0]['fingerprint'], fingerprint) def test_create_keypair(self): - self.assertRaises(LibcloudError, self.driver.ex_create_keypair, - 'cs-keypair') + self.assertRaises( + LibcloudError, + self.driver.ex_create_keypair, + 'cs-keypair') def test_delete_keypair(self): res = self.driver.ex_delete_keypair('cs-keypair') From 5c7b98d31ec4cf5d80d0f66bf4ee21542cbddfb0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 25 Jun 2013 09:48:45 +0200 Subject: [PATCH 094/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 42562299e2..72b5a6f12b 100644 --- a/CHANGES +++ b/CHANGES @@ -112,6 +112,10 @@ Changes with Apache Libcloud in deveploment: - Add a driver for Rackspace's new datacenter in Sydney, Australia. [Alex Gaynor, Tomaz Muraus] + - Add ex_list_networks method and missing tests for list_templates to the + CloudStack driver. (LIBCLOUD-349) + [Philipp Strube] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles From 30dfdb4848cf7c0cfb42c331afd657b01fbf2be1 Mon Sep 17 00:00:00 2001 From: Jayy Vis Date: Wed, 26 Jun 2013 02:47:24 +0530 Subject: [PATCH 095/143] LIBCLOUD-331: Adds the new error type ProviderError. Modifies InvalidCredsError to extend from it. Signed-off-by: Tomaz Muraus --- libcloud/common/types.py | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/libcloud/common/types.py b/libcloud/common/types.py index 2c31575fe7..f6b9494b16 100644 --- a/libcloud/common/types.py +++ b/libcloud/common/types.py @@ -13,13 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from libcloud.utils.py3 import httplib + __all__ = [ "LibcloudError", "MalformedResponseError", + "ProviderError", "InvalidCredsError", "InvalidCredsException", "LazyList" - ] +] class LibcloudError(Exception): @@ -61,12 +64,20 @@ def __repr__(self): + repr(self.body)) -class InvalidCredsError(LibcloudError): - """Exception used when invalid credentials are used on a provider.""" +class ProviderError(LibcloudError): + """ + Exception used when provider gives back + error response (HTTP 4xx, 5xx) for a request. - def __init__(self, value='Invalid credentials with the provider', + Specific sub types can be derieved for errors like + HTTP 401 : InvalidCredsError + HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError + """ + + def __init__(self, value, http_code, driver=None): self.value = value + self.http_code = http_code self.driver = driver def __str__(self): @@ -76,6 +87,16 @@ def __repr__(self): return repr(self.value) +class InvalidCredsError(ProviderError): + """Exception used when invalid credentials are used on a provider.""" + + def __init__(self, value='Invalid credentials with the provider', + driver=None): + super(InvalidCredsError, self).__init__(value, + http_code=httplib.UNAUTHORIZED, + driver=driver) + + # Deprecated alias of L{InvalidCredsError} InvalidCredsException = InvalidCredsError From ed8350e038c4306616941a44c02e00484cdd419d Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 25 Jun 2013 23:40:05 +0200 Subject: [PATCH 096/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 72b5a6f12b..444c9c5b77 100644 --- a/CHANGES +++ b/CHANGES @@ -22,6 +22,10 @@ Changes with Apache Libcloud in deveploment: not provided. [Tomaz Muraus] + - Add the new error class ProviderError and modify InvalidCredsError to + inherit from it. (LIBCLOUD-331) + [Jayy Vis] + *) Misc - Add unittest2 library dependency for tests and update some tests to use From d9b9de8826e5846ab5db76815a0722a8bb7a6e3b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Jun 2013 16:04:03 +0200 Subject: [PATCH 097/143] Modify Rackspace DNS driver to use main auth endpoint and update the code which constructs the API endpoint url. --- libcloud/dns/drivers/rackspace.py | 12 +++++++++--- libcloud/test/dns/test_rackspace.py | 13 ++++++++++--- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index dd30171bc7..f33dc2a5e0 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -25,7 +25,7 @@ from libcloud.common.base import PollingConnection from libcloud.common.types import LibcloudError from libcloud.utils.misc import merge_valid_keys, get_new_obj -from libcloud.common.rackspace import AUTH_URL_US, AUTH_URL_UK +from libcloud.common.rackspace import AUTH_URL_US from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection from libcloud.compute.drivers.openstack import OpenStack_1_1_Response @@ -71,7 +71,6 @@ class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): """ Rackspace DNS Connection class. """ - responseCls = RackspaceDNSResponse XML_NAMESPACE = None poll_interval = 2.5 @@ -124,7 +123,14 @@ class RackspaceUSDNSConnection(RackspaceDNSConnection): class RackspaceUKDNSConnection(RackspaceDNSConnection): - auth_url = AUTH_URL_UK + auth_url = AUTH_URL_US + + def get_endpoint(self): + public_url = super(RackspaceUKDNSConnection, self).get_endpoint() + public_url = public_url.replace('https://dns.api', + 'https://lon.dns.api') + + return public_url class RackspaceDNSDriver(DNSDriver, OpenStackDriverMixin): diff --git a/libcloud/test/dns/test_rackspace.py b/libcloud/test/dns/test_rackspace.py index ab61a2035a..5ac698bea6 100644 --- a/libcloud/test/dns/test_rackspace.py +++ b/libcloud/test/dns/test_rackspace.py @@ -30,6 +30,7 @@ class RackspaceUSTests(unittest.TestCase): klass = RackspaceUSDNSDriver + region = '' def setUp(self): self.klass.connectionCls.conn_classes = ( @@ -70,8 +71,13 @@ def test_gets_auth_2_0_endpoint(self): driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) driver.connection._populate_hosts_and_request_paths() - self.assertEquals('https://dns.api.rackspacecloud.com/v1.0/11111', - driver.connection.get_endpoint()) + if self.region: + url = 'https://%s.dns.api.rackspacecloud.com/v1.0/11111' % \ + (self.region) + else: + url = 'https://dns.api.rackspacecloud.com/v1.0/11111' + + self.assertEquals(url, driver.connection.get_endpoint()) def test_list_record_types(self): record_types = self.driver.list_record_types() @@ -310,8 +316,9 @@ def test_to_full_record_name_name_not_provided(self): 'foo.bar') -class RackspaceUK1Tests(RackspaceUSTests): +class RackspaceUKTests(RackspaceUSTests): klass = RackspaceUKDNSDriver + region = 'lon' class RackspaceMockHttp(MockHttp): From d6544821096866cdb5ef5223d42352c3d308e238 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Jun 2013 16:05:46 +0200 Subject: [PATCH 098/143] Modify Rackspace loadbalancer driver to use main auth endpoint and update the code which constructs the API endpoint url. --- libcloud/loadbalancer/drivers/rackspace.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/libcloud/loadbalancer/drivers/rackspace.py b/libcloud/loadbalancer/drivers/rackspace.py index 7838de1388..6483014b66 100644 --- a/libcloud/loadbalancer/drivers/rackspace.py +++ b/libcloud/loadbalancer/drivers/rackspace.py @@ -29,7 +29,7 @@ from libcloud.loadbalancer.types import State, MemberCondition from libcloud.common.openstack import OpenStackBaseConnection,\ OpenStackDriverMixin -from libcloud.common.rackspace import (AUTH_URL_US, AUTH_URL_UK) +from libcloud.common.rackspace import AUTH_URL_US class RackspaceResponse(JsonResponse): @@ -289,14 +289,24 @@ def get_endpoint(self): def _construct_loadbalancer_endpoint_from_servers_endpoint(self, ep): if 'publicURL' in ep: - loadbalancer_prefix = "%s.loadbalancers" % self._ex_force_region - return ep['publicURL'].replace("servers", loadbalancer_prefix) + public_url = ep['publicURL'] + + # Old, UK accont + public_url = public_url.replace('lon.servers', 'servers') + loadbalancer_prefix = '%s.loadbalancers' % (self._ex_force_region) + + return public_url.replace('servers', loadbalancer_prefix) else: raise LibcloudError('Could not find specified endpoint') class RackspaceUKConnection(RackspaceConnection): - auth_url = AUTH_URL_UK + auth_url = AUTH_URL_US + + def __init__(self, user_id, key, secure=True, ex_force_region='lon', + **kwargs): + super(RackspaceUKConnection, self).__init__(user_id, key, secure, + ex_force_region, **kwargs) class RackspaceLBDriver(Driver, OpenStackDriverMixin): From f3be0573b86250c32651556807f71193221fa842 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Jun 2013 16:49:28 +0200 Subject: [PATCH 099/143] Correctly throw InvalidCredsError if user passes invalid credentials to the DigitalOcean driver. --- CHANGES | 4 ++++ libcloud/compute/drivers/digitalocean.py | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 444c9c5b77..95703ee1ca 100644 --- a/CHANGES +++ b/CHANGES @@ -120,6 +120,10 @@ Changes with Apache Libcloud in deveploment: CloudStack driver. (LIBCLOUD-349) [Philipp Strube] + - Correctly throw InvalidCredsError if user passes invalid credentials to + the DigitalOcean driver. + [Tomaz muraus] + *) Storage - Fix an issue with double encoding the container name in the CloudFiles diff --git a/libcloud/compute/drivers/digitalocean.py b/libcloud/compute/drivers/digitalocean.py index cde0b184e9..0f4ee4083c 100644 --- a/libcloud/compute/drivers/digitalocean.py +++ b/libcloud/compute/drivers/digitalocean.py @@ -29,6 +29,9 @@ def parse_error(self): if self.status == httplib.FOUND and '/api/error' in self.body: # Hacky, but DigitalOcean error responses are awful raise InvalidCredsError(self.body) + elif self.status == httplib.UNAUTHORIZED: + body = self.parse_body() + raise InvalidCredsError(body['message']) class SSHKey(object): @@ -99,8 +102,8 @@ def create_node(self, name, size, image, location, ex_ssh_key_ids=None, """ Create a node. - @keyword ex_ssh_key_ids: A list of ssh key ids which will be added to - the server. (optional) + @keyword ex_ssh_key_ids: A list of ssh key ids which will be added + to the server. (optional) @type ex_ssh_key_ids: C{list} of C{str} @return: The newly created node. From 4925333d0a68b28b184e13f3abcb09eaced452e7 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Jun 2013 18:52:31 +0200 Subject: [PATCH 100/143] Bump version. --- CHANGES | 2 +- libcloud/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 95703ee1ca..640bd2f4db 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud in deveploment: +Changes with Apache Libcloud 0.13.0: *) General diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 58fc9bb9ad..acba9a2c0b 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.12.4' +__version__ = '0.13.0' import os import atexit From 9c0d11a4cf7e588ad87c7e57311bbb1d955cc2a7 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Jun 2013 19:13:25 +0200 Subject: [PATCH 101/143] Add missing setUp method to some OpenStack tests. --- libcloud/test/compute/test_openstack.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index e77aefb735..1a9ad35829 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -77,6 +77,9 @@ def test_non_xml_content_type_handling(self): class OpenStackServiceCatalogTests(unittest.TestCase): # TODO refactor and move into libcloud/test/common + def setUp(self): + OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) + def test_connection_get_service_catalog(self): connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) connection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) @@ -100,6 +103,9 @@ def test_connection_get_service_catalog(self): class OpenStackAuthConnectionTests(unittest.TestCase): # TODO refactor and move into libcloud/test/common + def setUp(self): + OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) + def test_basic_authentication(self): tuples = [ ('1.0', OpenStackMockHttp), From a8e473d2aac6e06b027afc6cb861aae0e8cf3685 Mon Sep 17 00:00:00 2001 From: Bob Thompson Date: Wed, 3 Jul 2013 11:22:02 -0400 Subject: [PATCH 102/143] LIBCLOUD-357: Modified ElasticHosts to store drive UUID in 'extra' field. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/elasticstack.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/libcloud/compute/drivers/elasticstack.py b/libcloud/compute/drivers/elasticstack.py index 7058dbd285..b957614ab2 100644 --- a/libcloud/compute/drivers/elasticstack.py +++ b/libcloud/compute/drivers/elasticstack.py @@ -469,6 +469,14 @@ def _to_node(self, data, ssh_password=None): if 'vnc:password' in data: extra['vnc:password'] = data['vnc:password'] + boot_device = data['boot'] + + if isinstance(boot_device, list): + for device in boot_device: + extra[device] = data[device] + else: + extra[boot_device] = data[boot_device] + if ssh_password: extra.update({'password': ssh_password}) From 360a93de0a5e27f0e5d345d78a63c8f89f251252 Mon Sep 17 00:00:00 2001 From: Bob Thompson Date: Fri, 5 Jul 2013 08:55:00 -0400 Subject: [PATCH 103/143] LIBCLOUD-357: Updated unit tests for ElasticStack to ensure driver UUID is stored in 'extra' field. Signed-off-by: Tomaz Muraus --- libcloud/test/compute/test_elasticstack.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/libcloud/test/compute/test_elasticstack.py b/libcloud/test/compute/test_elasticstack.py index af08dfbf24..0b3faf8a22 100644 --- a/libcloud/test/compute/test_elasticstack.py +++ b/libcloud/test/compute/test_elasticstack.py @@ -105,6 +105,7 @@ def test_list_nodes(self): self.assertEqual(node.public_ips[0], "1.2.3.4") self.assertEqual(node.public_ips[1], "1.2.3.5") self.assertEqual(node.extra['smp'], 1) + self.assertEqual(node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") def test_list_sizes(self): images = self.driver.list_sizes() @@ -194,20 +195,20 @@ class ElasticStackMockHttp(MockHttp): fixtures = ComputeFileFixtures('elastichosts') def _servers_info_UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_info_MALFORMED(self, method, url, body, headers): - body = "{malformed: '" - return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) + body = "{malformed: '" + return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_info_PARSE_ERROR(self, method, url, body, headers): - return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) + return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _drives_create(self, method, url, body, headers): body = self.fixtures.load('drives_create.json') From 5e481e06175681b3be44992fe59d540b43e9db29 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 5 Jul 2013 15:45:29 +0200 Subject: [PATCH 104/143] Update changes. --- CHANGES | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES b/CHANGES index 640bd2f4db..350598fac3 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,13 @@ -*- coding: utf-8 -*- +Changes with Apache Libcloud in development + + *) Compute + + - Modify ElasticHosts drive to store drive UUID in 'extra' field. + (LIBCLOUD-357) + [Bob Thompson] + Changes with Apache Libcloud 0.13.0: *) General From c79d0009e0fbb9e61f9cdd184826850c43198483 Mon Sep 17 00:00:00 2001 From: Bernard Kerckenaere Date: Fri, 28 Jun 2013 12:06:13 +0200 Subject: [PATCH 105/143] Issue LIBCLOUD-352: Add list_volumes to compute. Signed-off-by: Tomaz Muraus --- libcloud/compute/base.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 829ec37cbb..03f0ecf60c 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -770,6 +770,16 @@ def detach_volume(self, volume): raise NotImplementedError('detach not implemented for this driver') + def list_volumes(self): + """ + List storage volumes. + + @return: list of storageVolume objects + @rtype: C{list} of L{StorageVolume} + """ + raise NotImplementedError( + 'list_volumes not implemented for this driver') + def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): # This is here for backward compatibility and will be removed in the From 7a7b2f4593b8ad02c540cbed37c2473907e7d8eb Mon Sep 17 00:00:00 2001 From: Emanuele Rocca Date: Mon, 1 Jul 2013 20:36:19 +0200 Subject: [PATCH 106/143] Issue LIBCLOUD-354: Add support for volume-related functions to OpenNebula compute driver Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/opennebula.py | 148 ++++++++++++++++-- .../fixtures/opennebula_3_6/compute_15.xml | 17 ++ .../fixtures/opennebula_3_6/compute_5.xml | 22 +++ .../fixtures/opennebula_3_6/disk_10.xml | 7 + .../fixtures/opennebula_3_6/disk_15.xml | 7 + .../fixtures/opennebula_3_6/storage_5.xml | 13 ++ libcloud/test/compute/test_opennebula.py | 139 ++++++++++++++++ 7 files changed, 342 insertions(+), 11 deletions(-) create mode 100644 libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml create mode 100644 libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml diff --git a/libcloud/compute/drivers/opennebula.py b/libcloud/compute/drivers/opennebula.py index 7700198094..da7af0704c 100644 --- a/libcloud/compute/drivers/opennebula.py +++ b/libcloud/compute/drivers/opennebula.py @@ -32,7 +32,7 @@ from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation from libcloud.common.base import ConnectionUserAndKey, XmlResponse -from libcloud.compute.base import NodeImage, NodeSize +from libcloud.compute.base import NodeImage, NodeSize, StorageVolume from libcloud.common.types import InvalidCredsError from libcloud.compute.providers import Provider @@ -301,6 +301,8 @@ def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION, cls = OpenNebula_3_0_NodeDriver elif api_version in ['3.2']: cls = OpenNebula_3_2_NodeDriver + elif api_version in ['3.6']: + cls = OpenNebula_3_6_NodeDriver elif api_version in ['3.8']: cls = OpenNebula_3_8_NodeDriver if 'plain_auth' not in kwargs: @@ -868,28 +870,35 @@ def _extract_images(self, compute): @type compute: L{ElementTree} @param compute: XML representation of a compute node. - @rtype: L{NodeImage} - @return: First disk attached to a compute node. + @rtype: C{list} of L{NodeImage} + @return: Disks attached to a compute node. """ disks = list() for element in compute.findall('DISK'): disk = element.find('STORAGE') - disk_id = disk.attrib['href'].partition('/storage/')[2] + image_id = disk.attrib['href'].partition('/storage/')[2] + + if 'id' in element.attrib: + disk_id = element.attrib['id'] + else: + disk_id = None disks.append( - NodeImage(id=disk_id, + NodeImage(id=image_id, name=disk.attrib.get('name', None), driver=self.connection.driver, extra={'type': element.findtext('TYPE'), + 'disk_id': disk_id, 'target': element.findtext('TARGET')})) - # @TODO: Return all disks when the Node type accepts multiple - # attached disks per node. - if len(disks) > 0: + # Return all disks when the Node type accepts multiple attached disks + # per node. + if len(disks) > 1: + return disks + + if len(disks) == 1: return disks[0] - else: - return None def _extract_size(self, compute): """ @@ -1071,7 +1080,124 @@ def _get_attributes_values(self, attributes, element): return values -class OpenNebula_3_8_NodeDriver(OpenNebula_3_2_NodeDriver): +class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.6. + """ + + def create_volume(self, size, name, location=None, snapshot=None): + storage = ET.Element('STORAGE') + + vol_name = ET.SubElement(storage, 'NAME') + vol_name.text = name + + vol_type = ET.SubElement(storage, 'TYPE') + vol_type.text = 'DATABLOCK' + + description = ET.SubElement(storage, 'DESCRIPTION') + description.text = 'Attached storage' + + public = ET.SubElement(storage, 'PUBLIC') + public.text = 'NO' + + persistent = ET.SubElement(storage, 'PERSISTENT') + persistent.text = 'YES' + + fstype = ET.SubElement(storage, 'FSTYPE') + fstype.text = 'ext3' + + vol_size = ET.SubElement(storage, 'SIZE') + vol_size.text = str(size) + + xml = ET.tostring(storage) + volume = self.connection.request('/storage', + { 'occixml': xml }, method='POST').object + + return self._to_volume(volume) + + def destroy_volume(self, volume): + url = '/storage/%s' % (str(volume.id)) + resp = self.connection.request(url, method='DELETE') + + return resp.status == httplib.NO_CONTENT + + def attach_volume(self, node, volume, device): + action = ET.Element('ACTION') + + perform = ET.SubElement(action, 'PERFORM') + perform.text = 'ATTACHDISK' + + params = ET.SubElement(action, 'PARAMS') + + ET.SubElement(params, + 'STORAGE', + {'href': '/storage/%s' % (str(volume.id))}) + + target = ET.SubElement(params, 'TARGET') + target.text = device + + xml = ET.tostring(action) + + url = '/compute/%s/action' % node.id + + resp = self.connection.request(url, method='POST', data=xml) + return resp.status == httplib.ACCEPTED + + def _do_detach_volume(self, node_id, disk_id): + action = ET.Element('ACTION') + + perform = ET.SubElement(action, 'PERFORM') + perform.text = 'DETACHDISK' + + params = ET.SubElement(action, 'PARAMS') + + ET.SubElement(params, + 'DISK', + {'id': disk_id}) + + xml = ET.tostring(action) + + url = '/compute/%s/action' % node_id + + resp = self.connection.request(url, method='POST', data=xml) + return resp.status == httplib.ACCEPTED + + def detach_volume(self, volume): + # We need to find the node using this volume + for node in self.list_nodes(): + if type(node.image) is not list: + # This node has only one associated image. It is not the one we + # are after. + continue + + for disk in node.image: + if disk.id == volume.id: + # Node found. We can now detach the volume + disk_id = disk.extra['disk_id'] + return self._do_detach_volume(node.id, disk_id) + + return False + + def list_volumes(self): + return self._to_volumes(self.connection.request('/storage').object) + + def _to_volume(self, storage): + return StorageVolume(id=storage.findtext('ID'), + name=storage.findtext('NAME'), + size=int(storage.findtext('SIZE')), + driver=self.connection.driver) + + def _to_volumes(self, object): + volumes = [] + for storage in object.findall('STORAGE'): + storage_id = storage.attrib['href'].partition('/storage/')[2] + + volumes.append(self._to_volume( + self.connection.request('/storage/%s' % storage_id).object)) + + return volumes + +class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver): """ OpenNebula.org node driver for OpenNebula.org v3.8. """ diff --git a/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml b/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml new file mode 100644 index 0000000000..ce928ecfd0 --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml @@ -0,0 +1,17 @@ + + + 15 + Compute 15 Test + small + ACTIVE + + + FILE + hda + + + + 192.168.122.2 + 02:00:c0:a8:7a:02 + + diff --git a/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml b/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml new file mode 100644 index 0000000000..6767122d49 --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml @@ -0,0 +1,22 @@ + + + 5 + Compute 5 Test + small + ACTIVE + + + FILE + hda + + + + FILE + sda + + + + 192.168.122.2 + 02:00:c0:a8:7a:02 + + diff --git a/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml b/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml new file mode 100644 index 0000000000..1da6fa241b --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml @@ -0,0 +1,7 @@ + + + 10 + Debian 7.1 LAMP + 2048 + file:///images/debian/wheezy.img + diff --git a/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml b/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml new file mode 100644 index 0000000000..811369bf5d --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml @@ -0,0 +1,7 @@ + + + 15 + Debian Sid + 1024 + file:///images/debian/sid.img + diff --git a/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml b/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml new file mode 100644 index 0000000000..27aaf735ea --- /dev/null +++ b/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml @@ -0,0 +1,13 @@ + + 5 + test-volume + + oneadmin + READY + DATABLOCK + Attached storage + 1000 + ext3 + NO + YES + diff --git a/libcloud/test/compute/test_opennebula.py b/libcloud/test/compute/test_opennebula.py index 9cee04d4ee..d978a5daa6 100644 --- a/libcloud/test/compute/test_opennebula.py +++ b/libcloud/test/compute/test_opennebula.py @@ -631,6 +631,74 @@ def test_list_sizes(self): self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) +class OpenNebula_3_6_Tests(unittest.TestCase, OpenNebulaCaseMixin): + """ + OpenNebula.org test suite for OpenNebula v3.6. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_6_MockHttp, OpenNebula_3_6_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',)) + + def test_create_volume(self): + new_volume = self.driver.create_volume(1000, 'test-volume') + + self.assertEquals(new_volume.id, '5') + self.assertEquals(new_volume.size, 1000) + self.assertEquals(new_volume.name, 'test-volume') + + def test_destroy_volume(self): + images = self.driver.list_images() + + self.assertEqual(len(images), 2) + image = images[0] + + ret = self.driver.destroy_volume(image) + self.assertTrue(ret) + + def test_attach_volume(self): + nodes = self.driver.list_nodes() + node = nodes[0] + + images = self.driver.list_images() + image = images[0] + + ret = self.driver.attach_volume(node, image, 'sda') + self.assertTrue(ret) + + def test_detach_volume(self): + images = self.driver.list_images() + image = images[1] + + ret = self.driver.detach_volume(image) + self.assertTrue(ret) + + nodes = self.driver.list_nodes() + # node with only a single associated image + node = nodes[1] + + ret = self.driver.detach_volume(node.image) + self.assertFalse(ret) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + + self.assertEqual(len(volumes), 2) + + volume = volumes[0] + self.assertEqual(volume.id, '5') + self.assertEqual(volume.size, 2048) + self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP') + + volume = volumes[1] + self.assertEqual(volume.id, '15') + self.assertEqual(volume.size, 1024) + self.assertEqual(volume.name, 'Debian Sid') + class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v3.8. @@ -1069,6 +1137,77 @@ def _instance_type(self, method, url, body, headers): body = self.fixtures_3_2.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) +class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp): + """ + Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver. + """ + + fixtures_3_6 = ComputeFileFixtures('opennebula_3_6') + + def _storage(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('storage_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures_3_6.load('storage_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _compute_5(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures_3_6.load('compute_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _compute_5_action(self, method, url, body, headers): + body = self.fixtures_3_6.load('compute_5.xml') + if method == 'POST': + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'GET': + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _compute_15(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures_3_6.load('compute_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _storage_10(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures_3_6.load('disk_10.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _storage_15(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures_3_6.load('disk_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp): """ From 4c7a1de0f5e69b7afdf707c2c0020c180a9827fd Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 8 Jul 2013 12:55:22 +0200 Subject: [PATCH 107/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 350598fac3..158d552ce3 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,10 @@ Changes with Apache Libcloud in development (LIBCLOUD-357) [Bob Thompson] + - Add support for volume related functions to OpenNebula driver. + (LIBCLOUD-354) + [Emanuele Rocca] + Changes with Apache Libcloud 0.13.0: *) General From 36f8a94d2f9671bda2eb68245de7a700dfbeb98c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 8 Jul 2013 12:59:22 +0200 Subject: [PATCH 108/143] Add explicit 'return None' to the function. --- libcloud/compute/drivers/opennebula.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/opennebula.py b/libcloud/compute/drivers/opennebula.py index da7af0704c..cf30e74b98 100644 --- a/libcloud/compute/drivers/opennebula.py +++ b/libcloud/compute/drivers/opennebula.py @@ -896,9 +896,10 @@ def _extract_images(self, compute): # per node. if len(disks) > 1: return disks - - if len(disks) == 1: + elif len(disks) == 1: return disks[0] + else: + return None def _extract_size(self, compute): """ From 2a2b5041ba72633c09b588d7b68f556175fca715 Mon Sep 17 00:00:00 2001 From: Bernard Kerckenaere Date: Fri, 28 Jun 2013 15:14:59 +0200 Subject: [PATCH 109/143] Issue LIBCLOUD-353: Implement storageVolume methods for openstack driver. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/openstack.py | 87 ++++++++++++++++++- libcloud/test/compute/__init__.py | 12 ++- .../fixtures/openstack_v1.1/_os_volumes.json | 39 +++++++++ ..._cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json | 22 +++++ .../openstack_v1.1/_os_volumes_create.json | 17 ++++ .../_servers_12065_os_volume_attachments.json | 8 ++ libcloud/test/compute/test_openstack.py | 80 ++++++++++++++++- 7 files changed, 261 insertions(+), 4 deletions(-) create mode 100644 libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json create mode 100644 libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json create mode 100644 libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json create mode 100644 libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 8c2401a96e..4904443f4e 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -34,10 +34,10 @@ from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin -from libcloud.common.types import MalformedResponseError +from libcloud.common.types import MalformedResponseError, ProviderError from libcloud.compute.types import NodeState, Provider from libcloud.compute.base import NodeSize, NodeImage -from libcloud.compute.base import NodeDriver, Node, NodeLocation +from libcloud.compute.base import NodeDriver, Node, NodeLocation, StorageVolume from libcloud.pricing import get_size_price from libcloud.common.base import Response from libcloud.utils.xml import findall @@ -59,6 +59,10 @@ DEFAULT_API_VERSION = '1.1' +class OpenStackException(ProviderError): + pass + + class OpenStackResponse(Response): node_driver = None @@ -206,6 +210,67 @@ def list_nodes(self): return self._to_nodes( self.connection.request('/servers/detail').object) + def create_volume(self, size, name, location=None, snapshot=None): + if snapshot: + raise NotImplementedError( + "create_volume does not yet support create from snapshot") + return self.connection.request('/os-volumes', + method='POST', + data={ + 'volume': { + 'display_name': name, + 'display_description': name, + 'size': size, + 'volume_type': None, + 'metadata': { + 'contents': name, + }, + 'availability_zone': location, + } + }).success() + + def destroy_volume(self, volume): + return self.connection.request('/os-volumes/%s' % volume.id, + method='DELETE').success() + + def attach_volume(self, node, volume, device="auto"): + # when "auto" or None is provided for device, openstack will let + # the guest OS pick the next available device (fi. /dev/vdb) + return self.connection.request( + '/servers/%s/os-volume_attachments' % node.id, + method='POST', + data={ + 'volumeAttachment': { + 'volumeId': volume.id, + 'device': device, + } + }).success() + + def detach_volume(self, volume, ex_node=None): + # when ex_node is not provided, volume is detached from all nodes + failed_nodes = [] + for attachment in volume.extra['attachments']: + if not ex_node or ex_node.id == attachment['serverId']: + if not self.connection.request( + '/servers/%s/os-volume_attachments/%s' % + (attachment['serverId'], attachment['id']), + method='DELETE').success(): + failed_nodes.append(attachment['serverId']) + if failed_nodes: + raise OpenStackException( + 'detach_volume failed for nodes with id: %s' % + ', '.join(failed_nodes), 500, self + ) + return True + + def list_volumes(self): + return self._to_volumes( + self.connection.request('/os-volumes').object) + + def ex_get_volume(self, volumeId): + return self._to_volume( + self.connection.request('/os-volumes/%s' % volumeId).object) + def list_images(self, location=None, ex_only_active=True): """ @inherits: L{NodeDriver.list_images} @@ -1153,6 +1218,10 @@ def _to_nodes(self, obj): servers = obj['servers'] return [self._to_node(server) for server in servers] + def _to_volumes(self, obj): + volumes = obj['volumes'] + return [self._to_volume(volume) for volume in volumes] + def _to_sizes(self, obj): flavors = obj['flavors'] return [self._to_size(flavor) for flavor in flavors] @@ -1658,6 +1727,20 @@ def _to_node(self, api_node): ), ) + def _to_volume(self, api_node): + if 'volume' in api_node: + api_node = api_node['volume'] + return StorageVolume( + id=api_node['id'], + name=api_node['displayName'], + size=api_node['size'], + driver=self, + extra={ + 'description': api_node['displayDescription'], + 'attachments': [att for att in api_node['attachments'] if att], + } + ) + def _to_size(self, api_flavor, price=None, bandwidth=None): # if provider-specific subclasses can get better values for # price/bandwidth, then can pass them in when they super(). diff --git a/libcloud/test/compute/__init__.py b/libcloud/test/compute/__init__.py index ae754cfbf0..1ac4a11787 100644 --- a/libcloud/test/compute/__init__.py +++ b/libcloud/test/compute/__init__.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libcloud.compute.base import Node, NodeImage, NodeLocation +from libcloud.compute.base import Node, NodeImage, NodeLocation, StorageVolume from libcloud.pricing import get_pricing class TestCaseMixin(object): should_list_locations = True should_have_pricing = False + should_list_volumes = False def test_list_nodes_response(self): nodes = self.driver.list_nodes() @@ -46,6 +47,15 @@ def test_list_images_response(self): for image in images: self.assertTrue(isinstance(image, NodeImage)) + def test_list_volumes_response(self): + if not self.should_list_volumes: + return None + + volumes = self.driver.list_volumes() + self.assertTrue(isinstance(volumes, list)) + for volume in volumes: + self.assertTrue(isinstance(volume, StorageVolume)) + def test_list_locations_response(self): if not self.should_list_locations: return None diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json new file mode 100644 index 0000000000..d92e3e84b9 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json @@ -0,0 +1,39 @@ +{ + "volumes": [ + { + "attachments": [ + { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } + ], + "availabilityZone": "nova", + "createdAt": "2013-06-24T11:20:13.000000", + "displayDescription": "", + "displayName": "test_volume_2", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "metadata": {}, + "size": 2, + "snapshotId": null, + "status": "available", + "volumeType": "None" + }, + { + "attachments": [ + {} + ], + "availabilityZone": "nova", + "createdAt": "2013-06-21T12:39:02.000000", + "displayDescription": "some description", + "displayName": "test_volume", + "id": "cfcec3bc-b736-4db5-9535-4c24112691b5", + "metadata": {}, + "size": 50, + "snapshotId": null, + "status": "available", + "volumeType": "None" + } + ] +} diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json new file mode 100644 index 0000000000..b2c580fa98 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json @@ -0,0 +1,22 @@ +{ + "volume": { + "attachments": [ + { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } + ], + "availabilityZone": "nova", + "createdAt": "2013-06-24T11:20:13.000000", + "displayDescription": "", + "displayName": "test_volume_2", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "metadata": {}, + "size": 2, + "snapshotId": null, + "status": "in-use", + "volumeType": "None" + } +} diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json new file mode 100644 index 0000000000..d1a0cdf383 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json @@ -0,0 +1,17 @@ +{ + "volume": { + "attachments": [ + {} + ], + "availabilityZone": "nova", + "createdAt": "2013-06-28T12:22:39.616660", + "displayDescription": null, + "displayName": "test", + "id": "43b7db44-0497-40fa-b817-c906f13bbea3", + "metadata": {}, + "size": 1, + "snapshotId": null, + "status": "creating", + "volumeType": "None" + } +} diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json new file mode 100644 index 0000000000..c381384ac5 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json @@ -0,0 +1,8 @@ +{ + "volumeAttachment": { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } +} diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 1a9ad35829..5216c2d8dc 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -39,7 +39,7 @@ OpenStack_1_0_NodeDriver, OpenStack_1_0_Response, OpenStack_1_1_NodeDriver, OpenStackSecurityGroup, OpenStackSecurityGroupRule ) -from libcloud.compute.base import Node, NodeImage, NodeSize +from libcloud.compute.base import Node, NodeImage, NodeSize, StorageVolume from libcloud.pricing import set_pricing, clear_pricing_data from libcloud.test import MockResponse, MockHttpTestCase, XML_HEADERS @@ -222,6 +222,7 @@ def _get_mock_connection(self, mock_http_class): class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False + should_list_volumes = False driver_klass = OpenStack_1_0_NodeDriver driver_args = OPENSTACK_PARAMS @@ -525,6 +526,7 @@ def test_list_sizes_with_specified_pricing(self): class OpenStack_1_0_FactoryMethodTests(OpenStack_1_0_Tests): should_list_locations = False + should_list_volumes = False driver_klass = OpenStack_1_0_NodeDriver driver_type = get_driver(Provider.OPENSTACK) @@ -694,6 +696,7 @@ def _v1_1_auth_INTERNAL_SERVER_ERROR(self, method, url, body, headers): class OpenStack_1_1_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False + should_list_volumes = True driver_klass = OpenStack_1_1_NodeDriver driver_type = OpenStack_1_1_NodeDriver @@ -819,6 +822,26 @@ def test_list_nodes(self): self.assertEqual(node.extra['updated'], '2011-10-11T00:50:04Z') self.assertEqual(node.extra['created'], '2011-10-11T00:51:39Z') + def test_list_volumes(self): + volumes = self.driver.list_volumes() + self.assertEqual(len(volumes), 2) + volume = volumes[0] + + self.assertEqual('cd76a3a1-c4ce-40f6-9b9f-07a61508938d', volume.id) + self.assertEqual('test_volume_2', volume.name) + self.assertEqual(2, volume.size) + + self.assertEqual(volume.extra['description'], '') + self.assertEqual(volume.extra['attachments'][0]['id'], 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + + volume = volumes[1] + self.assertEqual('cfcec3bc-b736-4db5-9535-4c24112691b5', volume.id) + self.assertEqual('test_volume', volume.name) + self.assertEqual(50, volume.size) + + self.assertEqual(volume.extra['description'], 'some description') + self.assertEqual(volume.extra['attachments'], []) + def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 8, 'Wrong sizes count') @@ -887,6 +910,24 @@ def test_destroy_node(self): def test_reboot_node(self): self.assertTrue(self.node.reboot()) + def test_create_volume(self): + self.assertEqual(self.driver.create_volume(1, 'test'), True) + + def test_destroy_volume(self): + volume = self.driver.ex_get_volume('cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual(self.driver.destroy_volume(volume), True) + + def test_attach_volume(self): + node = self.driver.list_nodes()[0] + volume = self.driver.ex_get_volume('cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual(self.driver.attach_volume(node, volume, '/dev/sdb'), True) + + def test_detach_volume(self): + node = self.driver.list_nodes()[0] + volume = self.driver.ex_get_volume('cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual(self.driver.attach_volume(node, volume, '/dev/sdb'), True) + self.assertEqual(self.driver.detach_volume(volume), True) + def test_ex_set_password(self): try: self.driver.ex_set_password(self.node, 'New1&53jPass') @@ -1098,6 +1139,7 @@ def test_ex_delete_security_group_rule(self): class OpenStack_1_1_FactoryMethodTests(OpenStack_1_1_Tests): should_list_locations = False + should_list_volumes = True driver_klass = OpenStack_1_1_NodeDriver driver_type = get_driver(Provider.OPENSTACK) @@ -1287,6 +1329,42 @@ def _v1_1_slug_os_security_group_rules_2(self, method, url, body, headers): else: raise NotImplementedError() + def _v1_1_slug_os_volumes(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_os_volumes.json') + elif method == "POST": + body = self.fixtures.load('_os_volumes_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json') + elif method == "DELETE": + body = '' + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_os_volume_attachments(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load('_servers_12065_os_volume_attachments.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_os_volume_attachments_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers): + if method == "DELETE": + body = '' + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + # This exists because the nova compute url in devstack has v2 in there but the v1.1 fixtures # work fine. From b3a7467bf8a4d35ef2aaaab57c44c26fa44034b8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 11 Jul 2013 16:10:17 +0200 Subject: [PATCH 110/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 158d552ce3..9ca8cebbbd 100644 --- a/CHANGES +++ b/CHANGES @@ -12,6 +12,10 @@ Changes with Apache Libcloud in development (LIBCLOUD-354) [Emanuele Rocca] + - Add methods for managing store volumes to the OpenStack driver. + (LIBCLOUD-353) + [Bernard Kerckenaere] + Changes with Apache Libcloud 0.13.0: *) General From c1980ca76a00dbc5c92b90eda915a4390ff4d65c Mon Sep 17 00:00:00 2001 From: Rick Wright Date: Fri, 12 Jul 2013 12:57:20 -0700 Subject: [PATCH 111/143] Add new driver for Google Compute Engine. Signed-off-by: Tomaz Muraus --- demos/gce_demo.py | 282 +++ demos/secrets.py-dist | 3 + libcloud/common/google.py | 520 +++++ libcloud/compute/drivers/__init__.py | 1 + libcloud/compute/drivers/gce.py | 1794 +++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + libcloud/test/common/test_google.py | 241 +++ .../fixtures/gce/aggregated_addresses.json | 71 + .../fixtures/gce/aggregated_disks.json | 81 + .../fixtures/gce/aggregated_instances.json | 414 ++++ .../fixtures/gce/aggregated_machineTypes.json | 1683 ++++++++++++++++ .../fixtures/gce/global_firewalls.json | 88 + .../gce/global_firewalls_lcfirewall.json | 19 + .../global_firewalls_lcfirewall_delete.json | 14 + .../gce/global_firewalls_lcfirewall_put.json | 14 + .../fixtures/gce/global_firewalls_post.json | 13 + .../compute/fixtures/gce/global_images.json | 22 + .../fixtures/gce/global_images.json.save | 22 + .../compute/fixtures/gce/global_networks.json | 34 + .../fixtures/gce/global_networks_default.json | 9 + .../gce/global_networks_lcnetwork.json | 9 + .../gce/global_networks_lcnetwork_delete.json | 14 + ...networks_libcloud-demo-europe-network.json | 9 + ...global_networks_libcloud-demo-network.json | 9 + .../fixtures/gce/global_networks_post.json | 13 + ...on_global_firewalls_lcfirewall_delete.json | 15 + ...ation_global_firewalls_lcfirewall_put.json | 15 + ...tions_operation_global_firewalls_post.json | 15 + ...tion_global_networks_lcnetwork_delete.json | 15 + ...ations_operation_global_networks_post.json | 15 + ...s-central1_addresses_lcaddress_delete.json | 15 + ...on_regions_us-central1_addresses_post.json | 15 + ...n_zones_europe-west1-a_instances_post.json | 25 + ...nes_us-central1-a_disks_lcdisk_delete.json | 15 + ...ration_zones_us-central1-a_disks_post.json | 16 + ...entral1-a_instances_lcnode-000_delete.json | 16 + ...entral1-a_instances_lcnode-001_delete.json | 16 + ...a_instances_node-name_attachDisk_post.json | 16 + ...central1-a_instances_node-name_delete.json | 16 + ...a_instances_node-name_detachDisk_post.json | 16 + ...ral1-a_instances_node-name_reset_post.json | 15 + ...l1-a_instances_node-name_setTags_post.json | 16 + ...on_zones_us-central1-a_instances_post.json | 16 + .../test/compute/fixtures/gce/project.json | 74 + .../projects_debian-cloud_global_images.json | 157 ++ .../gce/regions_us-central1_addresses.json | 29 + ...gions_us-central1_addresses_lcaddress.json | 11 + ...s-central1_addresses_lcaddress_delete.json | 15 + .../regions_us-central1_addresses_post.json | 14 + libcloud/test/compute/fixtures/gce/zones.json | 207 ++ .../gce/zones_europe-west1-a_instances.json | 145 ++ .../zones_europe-west1-a_instances_post.json | 15 + ...pe-west1-a_machineTypes_n1-standard-1.json | 14 + .../fixtures/gce/zones_us-central1-a.json | 40 + .../gce/zones_us-central1-a_disks.json | 37 + .../gce/zones_us-central1-a_disks_lcdisk.json | 10 + ...nes_us-central1-a_disks_lcdisk_delete.json | 15 + .../gce/zones_us-central1-a_disks_post.json | 14 + .../gce/zones_us-central1-a_instances.json | 232 +++ ...es_us-central1-a_instances_lcnode-000.json | 42 + ...entral1-a_instances_lcnode-000_delete.json | 15 + ...es_us-central1-a_instances_lcnode-001.json | 42 + ...entral1-a_instances_lcnode-001_delete.json | 15 + ...nes_us-central1-a_instances_node-name.json | 42 + ...a_instances_node-name_attachDisk_post.json | 15 + ...central1-a_instances_node-name_delete.json | 15 + ...a_instances_node-name_detachDisk_post.json | 15 + ...ral1-a_instances_node-name_reset_post.json | 15 + ...l1-a_instances_node-name_setTags_post.json | 15 + .../zones_us-central1-a_instances_post.json | 14 + .../gce/zones_us-central1-a_machineTypes.json | 374 ++++ ...central1-a_machineTypes_n1-standard-1.json | 14 + libcloud/test/compute/test_gce.py | 703 +++++++ libcloud/test/secrets.py-dist | 3 + 75 files changed, 8039 insertions(+) create mode 100755 demos/gce_demo.py create mode 100644 libcloud/common/google.py create mode 100644 libcloud/compute/drivers/gce.py create mode 100644 libcloud/test/common/test_google.py create mode 100644 libcloud/test/compute/fixtures/gce/aggregated_addresses.json create mode 100644 libcloud/test/compute/fixtures/gce/aggregated_disks.json create mode 100644 libcloud/test/compute/fixtures/gce/aggregated_instances.json create mode 100644 libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json create mode 100644 libcloud/test/compute/fixtures/gce/global_firewalls.json create mode 100644 libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json create mode 100644 libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json create mode 100644 libcloud/test/compute/fixtures/gce/global_firewalls_post.json create mode 100644 libcloud/test/compute/fixtures/gce/global_images.json create mode 100644 libcloud/test/compute/fixtures/gce/global_images.json.save create mode 100644 libcloud/test/compute/fixtures/gce/global_networks.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_default.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json create mode 100644 libcloud/test/compute/fixtures/gce/global_networks_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json create mode 100644 libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json create mode 100644 libcloud/test/compute/fixtures/gce/project.json create mode 100644 libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json create mode 100644 libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json create mode 100644 libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json create mode 100644 libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json create mode 100644 libcloud/test/compute/test_gce.py diff --git a/demos/gce_demo.py b/demos/gce_demo.py new file mode 100755 index 0000000000..92a31b2008 --- /dev/null +++ b/demos/gce_demo.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This example performs several tasks on Google Compute Engine. It can be run +# directly or can be imported into an interactive python session. This can +# also serve as an integration test for the GCE Node Driver. +# +# To run interactively: +# - Make sure you have valid values in secrets.py +# (For more information about setting up your credentials, see the +# libcloud/common/google.py docstring) +# - Run 'python' in this directory, then: +# import gce_demo +# gce = gce_demo.get_gce_driver() +# gce.list_nodes() +# etc. +# - Or, to run the full demo from the interactive python shell: +# import gce_demo +# gce_demo.CLEANUP = False # optional +# gce_demo.MAX_NODES = 4 # optional +# gce_demo.DATACENTER = 'us-central1-a' # optional +# gce_demo.main() + +import os.path +import sys + +try: + import secrets +except ImportError: + secrets = None + +# Add parent dir of this file's dir to sys.path (OS-agnostically) +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +# Maximum number of 1-CPU nodes to allow to run simultaneously +MAX_NODES = 5 + +# String that all resource names created by the demo will start with +# WARNING: Any resource that has a matching name will be destroyed. +DEMO_BASE_NAME = 'libcloud-demo' + +# Datacenter to create resources in +DATACENTER = 'us-central1-a' + +# Clean up resources at the end (can be set to false in order to +# inspect resources at the end of the run). Resources will be cleaned +# at the beginning regardless. +CLEANUP = True + +args = getattr(secrets, 'GCE_PARAMS', ()) +kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + + +# ==== HELPER FUNCTIONS ==== +def get_gce_driver(): + driver = get_driver(Provider.GCE)(*args, datacenter=DATACENTER, **kwargs) + return driver + + +def display(title, resource_list): + """ + Display a list of resources. + + @param title: String to be printed at the heading of the list. + @type title: C{str} + + @param resource_list: List of resources to display + @type resource_list: Any C{object} with a C{name} attribute + """ + print('%s:' % title) + for item in resource_list[:10]: + print(' %s' % item.name) + + +def clean_up(base_name, node_list=None, resource_list=None): + """ + Destroy all resources that have a name beginning with 'base_name'. + + @param base_name: String with the first part of the name of resources + to destroy + @type base_name: C{str} + + @keyword node_list: List of nodes to consider for deletion + @type node_list: C{list} of L{Node} + + @keyword resource_list: List of resources to consider for deletion + @type resource_list: C{list} of I{Resource Objects} + """ + if node_list is None: + node_list = [] + if resource_list is None: + resource_list = [] + # Use ex_destroy_multiple_nodes to destroy nodes + del_nodes = [] + for node in node_list: + if node.name.startswith(base_name): + del_nodes.append(node) + + result = gce.ex_destroy_multiple_nodes(del_nodes) + for i, success in enumerate(result): + if success: + print(' Deleted %s' % del_nodes[i].name) + else: + print(' Failed to delete %s' % del_nodes[i].name) + + # Destroy everything else with just the destroy method + for resource in resource_list: + if resource.name.startswith(base_name): + if resource.destroy(): + print(' Deleted %s' % resource.name) + else: + print(' Failed to Delete %s' % resource.name) + + +# ==== DEMO CODE STARTS HERE ==== +def main(): + global gce + gce = get_gce_driver() + # Get project info and print name + project = gce.ex_get_project() + print('Project: %s' % project.name) + + # == Get Lists of Everything and Display the lists (up to 10) == + # These can either just return values for the current datacenter (zone) + # or for everything. + all_nodes = gce.list_nodes(ex_zone='all') + display('Nodes', all_nodes) + + all_addresses = gce.ex_list_addresses(region='all') + display('Addresses', all_addresses) + + all_volumes = gce.list_volumes(ex_zone='all') + display('Volumes', all_volumes) + + # This can return everything, but there is a large amount of overlap, + # so we'll just get the sizes from the current zone. + sizes = gce.list_sizes() + display('Sizes', sizes) + + # These are global + firewalls = gce.ex_list_firewalls() + display('Firewalls', firewalls) + + networks = gce.ex_list_networks() + display('Networks', networks) + + images = gce.list_images() + display('Images', images) + + locations = gce.list_locations() + display('Locations', locations) + + zones = gce.ex_list_zones() + display('Zones', zones) + + # == Clean up any old demo resources == + print('Cleaning up any "%s" resources:' % DEMO_BASE_NAME) + clean_up(DEMO_BASE_NAME, all_nodes, + all_addresses + all_volumes + firewalls + networks) + + # == Create Node with non-persistent disk == + if MAX_NODES > 1: + print('Creating Node with non-persistent disk:') + name = '%s-np-node' % DEMO_BASE_NAME + node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7', + ex_tags=['libcloud']) + print(' Node %s created' % name) + + # == Create, and attach a disk == + print('Creating a new disk:') + disk_name = '%s-attach-disk' % DEMO_BASE_NAME + volume = gce.create_volume(1, disk_name) + if volume.attach(node_1): + print (' Attached %s to %s' % (volume.name, node_1.name)) + + if CLEANUP: + # == Detach the disk == + if gce.detach_volume(volume, ex_node=node_1): + print(' Detached %s from %s' % (volume.name, node_1.name)) + + # == Create Node with persistent disk == + print('Creating Node with Persistent disk:') + name = '%s-persist-node' % DEMO_BASE_NAME + # Use objects this time instead of names + # Get latest Debian 7 image + image = gce.ex_get_image('debian-7') + # Get Machine Size + size = gce.ex_get_size('n1-standard-1') + # Create Disk. Size is None to just take default of image + volume_name = '%s-boot-disk' % DEMO_BASE_NAME + volume = gce.create_volume(None, volume_name, image=image) + # Create Node with Disk + node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'], + ex_boot_disk=volume) + print(' Node %s created with attached disk %s' % (node_2.name, + volume.name)) + + # == Update Tags for Node == + print('Updating Tags for %s' % node_2.name) + tags = node_2.extra['tags'] + tags.append('newtag') + if gce.ex_set_node_tags(node_2, tags): + print(' Tags updated for %s' % node_2.name) + check_node = gce.ex_get_node(node_2.name) + print(' New tags: %s' % check_node.extra['tags']) + + # == Create Multiple nodes at once == + base_name = '%s-multiple-nodes' % DEMO_BASE_NAME + number = MAX_NODES - 2 + if number > 0: + print('Creating Multiple Nodes (%s):' % number) + multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image, + number, + ex_tags=['libcloud']) + for node in multi_nodes: + print(' Node %s created.' % node.name) + + # == Create a Network == + print('Creating Network:') + name = '%s-network' % DEMO_BASE_NAME + cidr = '10.10.0.0/16' + network_1 = gce.ex_create_network(name, cidr) + print(' Network %s created' % network_1.name) + + # == Create a Firewall == + print('Creating a Firewall:') + name = '%s-firewall' % DEMO_BASE_NAME + allowed = [{'IPProtocol': 'tcp', + 'ports': ['3141']}] + firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1, + source_tags=['libcloud']) + print(' Firewall %s created' % firewall_1.name) + + # == Create a Static Address == + print('Creating an Address:') + name = '%s-address' % DEMO_BASE_NAME + address_1 = gce.ex_create_address(name) + print(' Address %s created with IP %s' % (address_1.name, + address_1.address)) + + # == List Updated Resources in current zone/region == + print('Updated Resources in current zone/region:') + nodes = gce.list_nodes() + display('Nodes', nodes) + + addresses = gce.ex_list_addresses() + display('Addresses', addresses) + + volumes = gce.list_volumes() + display('Volumes', volumes) + + firewalls = gce.ex_list_firewalls() + display('Firewalls', firewalls) + + networks = gce.ex_list_networks() + display('Networks', networks) + + if CLEANUP: + print('Cleaning up %s resources created.' % DEMO_BASE_NAME) + clean_up(DEMO_BASE_NAME, nodes, + addresses + volumes + firewalls + networks) + +if __name__ == '__main__': + main() diff --git a/demos/secrets.py-dist b/demos/secrets.py-dist index f7e3fc668f..82c3de168c 100644 --- a/demos/secrets.py-dist +++ b/demos/secrets.py-dist @@ -22,6 +22,9 @@ DREAMHOST_PARAMS = ('key',) EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) +GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +#GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication +GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') IBM_PARAMS = ('user', 'secret') # OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) diff --git a/libcloud/common/google.py b/libcloud/common/google.py new file mode 100644 index 0000000000..5c6e5241cf --- /dev/null +++ b/libcloud/common/google.py @@ -0,0 +1,520 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Module for Google Connection and Authentication classes. + +Information about setting up your Google OAUTH2 credentials: + +For libcloud, there are two basic methods for authenticating to Google using +OAUTH2: Service Accounts and Client IDs for Installed Applications. + +Both are initially set up from the +U{API Console} + +Setting up Service Account authentication (note that you need the PyCrypto +package installed to use this): + - Go to the API Console + - Click on "Create another client ID..." + - Select "Service account" and click on "Create client ID" + - Download the Private Key + - The key that you download is a PKCS12 key. It needs to be converted to + the PEM format. + - Convert the key using OpenSSL (the default password is 'notasecret'): + C{openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts + | openssl rsa -out PRIV.pem} + - Move the .pem file to a safe location. + - To Authenticate, you will need to pass the Service Account's "Email + address" in as the user_id and the path to the .pem file as the key. + +Setting up Installed Application authentication: + - Go to the API Connsole + - Click on "Create another client ID..." + - Select "Installed application" and click on "Create client ID" + - To Authenticate, pass in the "Client ID" as the user_id and the "Client + secret" as the key + - The first time that you do this, the libcloud will give you a URL to + visit. Copy and paste the URL into a browser. + - When you go to the URL it will ask you to log in (if you aren't already) + and ask you if you want to allow the project access to your account. + - Click on Accept and you will be given a code. + - Paste that code at the prompt given to you by the Google libcloud + connection. + - At that point, a token & refresh token will be stored in your home + directory and will be used for authentication. + +Please remember to secure your keys and access tokens. +""" +from __future__ import with_statement + +try: + import simplejson as json +except ImportError: + import json + +import base64 +import calendar +import errno +import time +import datetime +import os +import socket + +from libcloud.utils.py3 import urlencode, urlparse, PY3 +from libcloud.common.base import (ConnectionUserAndKey, JsonResponse, + PollingConnection) +from libcloud.compute.types import (InvalidCredsError, + MalformedResponseError, + LibcloudError) + +try: + from Crypto.Hash import SHA256 + from Crypto.PublicKey import RSA + from Crypto.Signature import PKCS1_v1_5 +except ImportError: + # The pycrypto library is unavailable + SHA256 = None + RSA = None + PKCS1_v1_5 = None + +TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ' + + +class GoogleAuthError(LibcloudError): + """Generic Error class for various authentication errors.""" + def __init__(self, value): + self.value = value + + def __repr__(self): + return repr(self.value) + + +class GoogleResponse(JsonResponse): + pass + + +class GoogleBaseDriver(object): + name = "Google API" + + +class GoogleBaseAuthConnection(ConnectionUserAndKey): + """ + Base class for Google Authentication. Should be subclassed for specific + types of authentication. + """ + driver = GoogleBaseDriver + responseCls = GoogleResponse + name = 'Google Auth' + host = 'accounts.google.com' + auth_path = '/o/oauth2/auth' + + def __init__(self, user_id, key, scope, + redirect_uri='urn:ietf:wg:oauth:2.0:oob', + login_hint=None, **kwargs): + """ + @param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + @type user_id: C{str} + + @param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + @type key: C{str} + + @param scope: A list of urls defining the scope of authentication + to grant. + @type scope: C{list} + + @keyword redirect_uri: The Redirect URI for the authentication + request. See Google OAUTH2 documentation for + more info. + @type redirect_uri: C{str} + + @keyword login_hint: Login hint for authentication request. Useful + for Installed Application authentication. + @type login_hint: C{str} + """ + + self.scope = " ".join(scope) + self.redirect_uri = redirect_uri + self.login_hint = login_hint + + super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs) + + def _now(self): + return datetime.datetime.utcnow() + + def add_default_headers(self, headers): + headers['Content-Type'] = "application/x-www-form-urlencoded" + headers['Host'] = self.host + return headers + + def _token_request(self, request_body): + """ + Return an updated token from a token request body. + + @param request_body: A dictionary of values to send in the body of the + token request. + @type request_body: C{dict} + + @return: A dictionary with updated token information + @rtype: C{dict} + """ + data = urlencode(request_body) + now = self._now() + response = self.request('/o/oauth2/token', method='POST', data=data) + token_info = response.object + if 'expires_in' in token_info: + expire_time = now + datetime.timedelta( + seconds=token_info['expires_in']) + token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT) + return token_info + + +class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection): + """Authentication connection for "Installed Application" authentication.""" + def get_code(self): + """ + Give the user a URL that they can visit to authenticate and obtain a + code. This method will ask for that code that the user can paste in. + + @return: Code supplied by the user after authenticating + @rtype: C{str} + """ + auth_params = {'response_type': 'code', + 'client_id': self.user_id, + 'redirect_uri': self.redirect_uri, + 'scope': self.scope, + 'state': 'Libcloud Request'} + if self.login_hint: + auth_params['login_hint'] = self.login_hint + + data = urlencode(auth_params) + + url = 'https://%s%s?%s' % (self.host, self.auth_path, data) + print('Please Go to the following URL and sign in:') + print(url) + if PY3: + code = input('Enter Code:') + else: + code = raw_input('Enter Code:') + return code + + def get_new_token(self): + """ + Get a new token. Generally used when no previous token exists or there + is no refresh token + + @return: Dictionary containing token information + @rtype: C{dict} + """ + # Ask the user for a code + code = self.get_code() + + token_request = {'code': code, + 'client_id': self.user_id, + 'client_secret': self.key, + 'redirect_uri': self.redirect_uri, + 'grant_type': 'authorization_code'} + + return self._token_request(token_request) + + def refresh_token(self, token_info): + """ + Use the refresh token supplied in the token info to get a new token. + + @param token_info: Dictionary containing current token information + @type token_info: C{dict} + + @return: A dictionary containing updated token information. + @rtype: C{dict} + """ + if 'refresh_token' not in token_info: + return self.get_new_token() + refresh_request = {'refresh_token': token_info['refresh_token'], + 'client_id': self.user_id, + 'client_secret': self.key, + 'grant_type': 'refresh_token'} + + new_token = self._token_request(refresh_request) + if 'refresh_token' not in new_token: + new_token['refresh_token'] = token_info['refresh_token'] + return new_token + + +class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection): + """Authentication class for "Service Account" authentication.""" + def __init__(self, user_id, key, *args, **kwargs): + """ + Check to see if PyCrypto is available, and convert key file path into a + key string if the key is in a file. + + @param user_id: Email address to be used for Service Account + authentication. + @type user_id: C{str} + + @param key: The RSA Key or path to file containing the key. + @type key: C{str} + """ + if SHA256 is None: + raise GoogleAuthError('PyCrypto library required for ' + 'Service Accout Authentication.') + # Check to see if 'key' is a file and read the file if it is. + keypath = os.path.expanduser(key) + is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) + if is_file_path: + with open(keypath, 'r') as f: + key = f.read() + super(GoogleServiceAcctAuthConnection, self).__init__( + user_id, key, *args, **kwargs) + + def get_new_token(self): + """ + Get a new token using the email address and RSA Key. + + @return: Dictionary containing token information + @rtype: C{dict} + """ + # The header is always the same + header = {'alg': 'RS256', 'typ': 'JWT'} + header_enc = base64.urlsafe_b64encode(json.dumps(header)) + + # Construct a claim set + claim_set = {'iss': self.user_id, + 'scope': self.scope, + 'aud': 'https://accounts.google.com/o/oauth2/token', + 'exp': int(time.time()) + 3600, + 'iat': int(time.time())} + claim_set_enc = base64.urlsafe_b64encode(json.dumps(claim_set)) + + # The message contains both the header and claim set + message = '%s.%s' % (header_enc, claim_set_enc) + # Then the message is signed using the key supplied + key = RSA.importKey(self.key) + hash_func = SHA256.new(message) + signer = PKCS1_v1_5.new(key) + signature = base64.urlsafe_b64encode(signer.sign(hash_func)) + + # Finally the message and signature are sent to get a token + jwt = '%s.%s' % (message, signature) + request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', + 'assertion': jwt} + + return self._token_request(request) + + def refresh_token(self, token_info): + """ + Refresh the current token. + + Service Account authentication doesn't supply a "refresh token" so + this simply gets a new token using the email address/key. + + @param token_info: Dictionary contining token information. + (Not used, but here for compatibility) + @type token_info: C{dict} + + @return: A dictionary containing updated token information. + @rtype: C{dict} + """ + return self.get_new_token() + + +class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): + """Base connection class for interacting with Google APIs.""" + driver = GoogleBaseDriver + responseCls = GoogleResponse + host = 'www.googleapis.com' + poll_interval = 2.0 + timeout = 120 + + def __init__(self, user_id, key, auth_type=None, + credential_file=None, **kwargs): + """ + Determine authentication type, set up appropriate authentication + connection and get initial authentication information. + + @param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + @type user_id: C{str} + + @param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + @type key: C{str} + + @keyword auth_type: Accepted values are "SA" or "IA" + ("Service Account" or "Installed Application"). + If not supplied, auth_type will be guessed based + on value of user_id. + @type auth_type: C{str} + + @keyword credential_file: Path to file for caching authentication + information. + @type credential_file: C{str} + """ + self.credential_file = credential_file or '~/.gce_libcloud_auth' + + if auth_type is None: + # Try to guess. Service accounts use an email address + # as the user id. + if '@' in user_id: + auth_type = 'SA' + else: + auth_type = 'IA' + if 'scope' in kwargs: + self.scope = kwargs['scope'] + kwargs.pop('scope', None) + self.token_info = self._get_token_info_from_file() + if auth_type == 'SA': + self.auth_conn = GoogleServiceAcctAuthConnection( + user_id, key, self.scope, **kwargs) + elif auth_type == 'IA': + self.auth_conn = GoogleInstalledAppAuthConnection( + user_id, key, self.scope, **kwargs) + else: + raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'') + + if self.token_info is None: + self.token_info = self.auth_conn.get_new_token() + self._write_token_info_to_file() + + self.token_expire_time = datetime.datetime.strptime( + self.token_info['expire_time'], TIMESTAMP_FORMAT) + + super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs) + + def _now(self): + return datetime.datetime.utcnow() + + def add_default_headers(self, headers): + """ + @inherits: L{Connection.add_default_headers} + """ + headers['Content-Type'] = "application/json" + headers['Host'] = self.host + return headers + + def pre_connect_hook(self, params, headers): + """ + Check to make sure that token hasn't expired. If it has, get an + updated token. Also, add the token to the headers. + + @inherits: L{Connection.pre_connect_hook} + """ + now = self._now() + if self.token_expire_time < now: + self.token_info = self.auth_conn.refresh_token(self.token_info) + self.token_expire_time = datetime.datetime.strptime( + self.token_info['expire_time'], TIMESTAMP_FORMAT) + self._write_token_info_to_file() + headers['Authorization'] = 'Bearer %s' % ( + self.token_info['access_token']) + + return params, headers + + def encode_data(self, data): + """Encode data to JSON""" + return json.dumps(data) + + def request(self, *args, **kwargs): + """ + @inherits: L{Connection.request} + """ + # Adds some retry logic for the occasional + # "Connection Reset by peer" error. + retries = 4 + tries = 0 + while tries < (retries - 1): + try: + return super(GoogleBaseConnection, self).request( + *args, **kwargs) + except socket.error: + e = sys.exc_info()[1] + if e.errno == errno.ECONNRESET: + tries = tries + 1 + else: + raise e + # One more time, then give up. + return super(GoogleBaseConnecion, self).request(*args, **kwargs) + + def _get_token_info_from_file(self): + """ + Read credential file and return token information. + + @return: Token information dictionary, or None + @rtype: C{dict} or C{None} + """ + token_info = None + filename = os.path.realpath(os.path.expanduser(self.credential_file)) + + try: + with open(filename, 'r') as f: + data = f.read() + token_info = json.loads(data) + except IOError: + pass + return token_info + + def _write_token_info_to_file(self): + """ + Write token_info to credential file. + """ + filename = os.path.realpath(os.path.expanduser(self.credential_file)) + data = json.dumps(self.token_info) + with open(filename, 'w') as f: + f.write(data) + + def has_completed(self, response): + """ + Determine if operation has completed based on response. + + @param response: JSON response + @type response: I{responseCls} + + @return: True if complete, False otherwise + @rtype: C{bool} + """ + if response.object['status'] == 'DONE': + return True + else: + return False + + def get_poll_request_kwargs(self, response, context, request_kwargs): + """ + @inherits: L{PollingConnection.get_poll_request_kwargs} + """ + return {'action': response.object['selfLink']} + + def morph_action_hook(self, action): + """ + Update action to correct request path. + + In many places, the Google API returns a full URL to a resource. + This will strip the scheme and host off of the path and just return + the request. Otherwise, it will append the base request_path to + the action. + + @param action: The action to be called in the http request + @type action: C{str} + + @return: The modified request based on the action + @rtype: C{str} + """ + if action.startswith('https://'): + u = urlparse.urlsplit(action) + request = urlparse.urlunsplit(('', '', u[2], u[3], u[4])) + else: + request = self.request_path + action + return request diff --git a/libcloud/compute/drivers/__init__.py b/libcloud/compute/drivers/__init__.py index 68f273a823..9c6c078391 100644 --- a/libcloud/compute/drivers/__init__.py +++ b/libcloud/compute/drivers/__init__.py @@ -27,6 +27,7 @@ 'elasticstack', 'elastichosts', 'cloudsigma', + 'gce', 'gogrid', 'hostvirtual', 'ibm_sce', diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py new file mode 100644 index 0000000000..c46b66de06 --- /dev/null +++ b/libcloud/compute/drivers/gce.py @@ -0,0 +1,1794 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Module for Google Compute Engine Driver. +""" +from __future__ import with_statement + +import datetime +import time +import sys +import os +import getpass + +from libcloud.common.google import GoogleResponse +from libcloud.common.google import GoogleBaseConnection + +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import NodeSize, StorageVolume, UuidMixin +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState, LibcloudError + +API_VERSION = 'v1beta15' +DEFAULT_TASK_COMPLETION_TIMEOUT = 180 + + +def timestamp_to_datetime(timestamp): + """ + Return a datetime object that corresponds to the time in an RFC3339 + timestamp. + + @param timestamp: RFC3339 timestamp string + @type timestamp: C{str} + + @return: Datetime object corresponding to timestamp + @rtype: C{datetime} + """ + # We remove timezone offset and microseconds (Python 2.5 strptime doesn't + # support %f) + ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') + tz_hours = int(timestamp[-5:-3]) + tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1') + tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins) + return ts + tz_delta + + +class GCEError(LibcloudError): + """Base class for general GCE Errors""" + def __init__(self, code, value): + self.code = code + self.value = value + + def __repr__(self): + return repr(self.code) + ": " + repr(self.value) + + +class GCEKnownError(GCEError): + """Base class for GCE Errors that can be classified""" + def __init__(self, value): + self.value = value + + def __repr__(self): + return repr(self.value) + + +class QuotaExceededError(GCEKnownError): + pass + + +class ResourceExistsError(GCEKnownError): + pass + + +class GCEResponse(GoogleResponse): + pass + + +class GCEConnection(GoogleBaseConnection): + """Connection class for the GCE driver.""" + host = 'www.googleapis.com' + responseCls = GCEResponse + + def __init__(self, user_id, key, secure, auth_type=None, + credential_file=None, project=None, **kwargs): + self.scope = ['https://www.googleapis.com/auth/compute'] + super(GCEConnection, self).__init__(user_id, key, secure=secure, + auth_type=auth_type, + credential_file=credential_file, + **kwargs) + self.request_path = '/compute/%s/projects/%s' % (API_VERSION, + project) + + +class GCEAddress(UuidMixin): + """A GCE Static address.""" + def __init__(self, id, name, address, region, driver, extra=None): + self.id = str(id) + self.name = name + self.address = address + self.region = region + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.address) + + def destroy(self): + """ + Destroy this address. + + @return: True if successful + @rtype: C{bool} + """ + return self.driver.ex_destroy_address(address=self) + + +class GCEFailedNode(object): + """Dummy Node object for nodes that are not created.""" + def __init__(self, name, error): + self.name = name + self.error = error + + def __repr__(self): + return '' % ( + self.name, self.error['code']) + + +class GCEFirewall(UuidMixin): + """A GCE Firewall rule class.""" + def __init__(self, id, name, allowed, network, source_ranges, source_tags, + driver, extra=None): + self.id = str(id) + self.name = name + self.network = network + self.allowed = allowed + self.source_ranges = source_ranges + self.source_tags = source_tags + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.network.name) + + def destroy(self): + """ + Destroy this firewall. + + @return: True if successful + @rtype: C{bool} + """ + return self.driver.ex_destroy_firewall(firewall=self) + + +class GCENetwork(UuidMixin): + """A GCE Network object class.""" + def __init__(self, id, name, cidr, driver, extra=None): + self.id = str(id) + self.name = name + self.cidr = cidr + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.cidr) + + def destroy(self): + """ + Destroy this newtwork + + @return: True if successful + @rtype: C{bool} + """ + return self.driver.ex_destroy_network(network=self) + + +class GCENodeSize(NodeSize): + """A GCE Node Size (MachineType) class.""" + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + extra=None): + self.extra = extra + super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth, + price, driver) + + +class GCEProject(UuidMixin): + """GCE Project information.""" + def __init__(self, id, name, metadata, quotas, driver, extra=None): + self.id = str(id) + self.name = name + self.metadata = metadata + self.quotas = quotas + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def _repr__(self): + return '' % (self.id, self.name) + + +class GCEZone(NodeLocation): + """Subclass of NodeLocation to provide additional information.""" + def __init__(self, id, name, status, maintenance_windows, quotas, + deprecated, driver, extra=None): + self.status = status + self.maintenance_windows = maintenance_windows + self.quotas = quotas + self.deprecated = deprecated + self.extra = extra + country = name.split('-')[0] + super(GCEZone, self).__init__(id=str(id), name=name, country=country, + driver=driver) + + def _now(self): + """ + Returns current UTC time. + + Can be overridden in unittests. + """ + return datetime.datetime.utcnow() + + def _get_next_maint(self): + """ + Returns the next Maintenance Window. + + @return: A dictionary containing maintenance window info + The dictionary contains 4 keys with values of type C{str} + - C{name}: The name of the maintence window + - C{description}: Description of the maintenance window + - C{beginTime}: RFC3339 Timestamp + - C{endTime}: RFC3339 Timestamp + @rtype: C{dict} + """ + begin = None + next_window = None + if len(self.maintenance_windows) == 1: + return self.maintenance_windows[0] + for mw in self.maintenance_windows: + begin_next = timestamp_to_datetime(mw['beginTime']) + if (not begin) or (begin_next < begin): + begin = begin_next + next_window = mw + return next_window + + def _get_time_until_mw(self): + """ + Returns time until next maintenance window. + + @return: Time until next maintenance window + @rtype: C{datetime.timedelta} + """ + next_window = self._get_next_maint() + now = self._now() + next_begin = timestamp_to_datetime(next_window['beginTime']) + return next_begin - now + + def _get_next_mw_duration(self): + """ + Returns the duration of the next maintenance window. + + @return: Duration of next maintenance window + @rtype: C{datetime.timedelta} + """ + next_window = self._get_next_maint() + next_begin = timestamp_to_datetime(next_window['beginTime']) + next_end = timestamp_to_datetime(next_window['endTime']) + return next_end - next_begin + + @property + def time_until_mw(self): + """ + Returns the time until the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_time_until_mw() + + @property + def next_mw_duration(self): + """ + Returns the duration of the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_next_mw_duration() + + def __repr__(self): + return '' % (self.id, self.name, + self.status) + + +class GCENodeDriver(NodeDriver): + """ + Base class for GCE Node Driver. + """ + connectionCls = GCEConnection + api_name = 'googleapis' + name = "Google Compute Engine" + type = Provider.GCE + website = 'https://www.googleapis.com/' + + NODE_STATE_MAP = { + "PROVISIONING": NodeState.PENDING, + "STAGING": NodeState.PENDING, + "RUNNING": NodeState.RUNNING, + "STOPPED": NodeState.TERMINATED, + "TERMINATED": NodeState.TERMINATED + } + + def __init__(self, user_id, key, datacenter=None, project=None, + auth_type=None, **kwargs): + """ + @param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + @type user_id: C{str} + + @param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + @type key: C{str} + + @keyword datacenter: The name of the datacenter (zone) used for + operations. + @type datacenter: C{str} + + @keyword project: Your GCE project name. (required) + @type project: C{str} + + @keyword auth_type: Accepted values are "SA" or "IA" + ("Service Account" or "Installed Application"). + If not supplied, auth_type will be guessed based + on value of user_id. + @type auth_type: C{str} + """ + self.auth_type = auth_type + self.project = project + if not self.project: + raise ValueError('Project name must be specified using ' + '"project" keyword.') + super(GCENodeDriver, self).__init__(user_id, key, **kwargs) + + # Cache Zone information to reduce API calls and increase speed + self.base_path = '/compute/%s/projects/%s' % (API_VERSION, + self.project) + self.zone_list = self.ex_list_zones() + self.zone_dict = {} + for zone in self.zone_list: + self.zone_dict[zone.name] = zone + if datacenter: + self.zone = self.ex_get_zone(datacenter) + else: + self.zone = None + + def _ex_connection_class_kwargs(self): + return {'auth_type': self.auth_type, + 'project': self.project} + + def _categorize_error(self, error): + """ + Parse error message returned from GCE operation and raise the + appropriate Exception. + + @param error: Error dictionary from a GCE Operations response + @type error: C{dict} + """ + err = error['errors'][0] + message = err['message'] + code = err['code'] + if code == 'QUOTA_EXCEEDED': + raise QuotaExceededError(message) + elif code == 'RESOURCE_ALREADY_EXISTS': + raise ResourceExistsError(message) + else: + raise GCEError(code, message) + + def _find_zone(self, name, res_type, region=False): + """ + Find the zone for a named resource. + + @param name: Name of resource to find + @type name: C{str} + + @param res_type: Type of resource to find. + Examples include: 'disks', 'instances' or 'addresses' + @type res_type: C{str} + + @keyword region: If True, find a region instead of a zone. + @keyword region: C{bool} + + @return: Name of zone (or region) that the resource is in. + @rtype: C{str} + """ + request = '/aggregated/%s' % res_type + res_list = self.connection.request(request).object + for k, v in res_list['items'].items(): + for res in v.get(res_type, []): + if res['name'] == name: + if region: + return k.replace('regions/', '') + else: + return k.replace('zones/', '') + + def _match_images(self, project, partial_name): + """ + Find the latest image, given a partial name. + + For example, providing 'debian-7' will return the image object for the + most recent image with a name that starts with 'debian-7' in the + supplied project. If no project is given, it will search your own + project. + + @param project: The name of the project to search for images. + Examples include: 'debian-cloud' and 'centos-cloud'. + @type project: C{str} or C{None} + + @param partial_name: The full name or beginning of a name for an + image. + @type partial_name: C{str} + + @return: The latest image object that maches the partial name. + @rtype: L{NodeImage} + """ + project_images = self.list_images(project) + partial_match = [] + for image in project_images: + if image.name == partial_name: + return image + if image.name.startswith(partial_name): + ts = timestamp_to_datetime(image.extra['creationTimestamp']) + if not partial_match or partial_match[0] < ts: + partial_match = [ts, image] + + if partial_match: + return partial_match[1] + + def ex_list_addresses(self, region=None): + """ + Return a list of static addreses for a region or all. + + @keyword region: The region to return addresses from. For example: + 'us-central1'. If None, will return addresses from + region of self.zone. If 'all', will return all + addresses. + @type region: C{str} or C{None} + + @return: A list of static address objects. + @rtype: C{list} of L{GCEAddress} + """ + list_addresses = [] + if region is None and self.zone: + region = '-'.join(self.zone.name.split('-')[:-1]) + elif region == 'all': + region = None + + if region is None: + request = '/aggregated/addresses' + else: + request = '/regions/%s/addresses' % region + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_addresses = [self._to_address(a) for a in + v.get('addresses', [])] + list_addresses.extend(region_addresses) + else: + list_addresses = [self._to_address(a) for a in + response['items']] + return list_addresses + + def ex_list_firewalls(self): + """ + Return the list of firewalls. + + @return: A list of firewall objects. + @rtype: C{list} of L{GCEFirewall} + """ + list_firewalls = [] + request = '/global/firewalls' + response = self.connection.request(request, method='GET').object + list_firewalls = [self._to_firewall(f) for f in + response.get('items', [])] + return list_firewalls + + def list_images(self, ex_project=None): + """ + Return a list of image objects for a project. + + @keyword ex_project: Optional alternate project name. + @type ex_project: C{str} or C{None} + + @return: List of NodeImage objects + @rtype: C{list} of L{NodeImage} + """ + list_images = [] + request = '/global/images' + if ex_project is None: + response = self.connection.request(request, method='GET').object + else: + # Save the connection request_path + save_request_path = self.connection.request_path + # Override the connection request path + new_request_path = save_request_path.replace(self.project, + ex_project) + self.connection.request_path = new_request_path + response = self.connection.request(request, method='GET').object + # Restore the connection request_path + self.connection.request_path = save_request_path + list_images = [self._to_node_image(i) for i in + response.get('items', [])] + return list_images + + def list_locations(self): + """ + Return a list of locations (zones). + + The L{ex_list_zones} method returns more comprehensive results, but + this is here for compatibility. + + @return: List of NodeLocation objects + @rtype: C{list} of L{NodeLocation} + """ + list_locations = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_locations = [self._to_node_location(l) for l in response['items']] + return list_locations + + def ex_list_networks(self): + """ + Return the list of networks. + + @return: A list of network objects. + @rtype: C{list} of L{GCENetwork} + """ + list_networks = [] + request = '/global/networks' + response = self.connection.request(request, method='GET').object + list_networks = [self._to_network(n) for n in + response.get('items', [])] + return list_networks + + def list_nodes(self, ex_zone=None): + """ + Return a list of nodes in the current zone or all zones. + + @keyword ex_zone: Optional zone name or 'all' + @type ex_zone: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: List of Node objects + @rtype: C{list} of L{Node} + """ + list_nodes = [] + # Use provided zone or default zone + zone = ex_zone or self.zone + # Setting ex_zone to 'all' overrides the default zone + if zone == 'all': + zone = None + if zone is None: + request = '/aggregated/instances' + elif hasattr(zone, 'name'): + request = '/zones/%s/instances' % zone.name + else: + request = '/zones/%s/instances' % zone + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_nodes = [self._to_node(i) for i in + v.get('instances', [])] + list_nodes.extend(zone_nodes) + else: + list_nodes = [self._to_node(i) for i in response['items']] + return list_nodes + + def list_sizes(self, location=None): + """ + Return a list of sizes (machineTypes) in a zone. + + @keyword location: Location or Zone for sizes + @type location: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: List of GCENodeSize objects + @rtype: C{list} of L{GCENodeSize} + """ + list_sizes = [] + location = location or self.zone + if location == 'all': + location = None + if location is None: + request = '/aggregated/machineTypes' + elif hasattr(location, 'name'): + request = '/zones/%s/machineTypes' % location.name + else: + request = '/zones/%s/machineTypes' % location + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if location is None: + for v in response['items'].values(): + zone_sizes = [self._to_node_size(s) for s in + v.get('machineTypes', [])] + list_sizes.extend(zone_sizes) + else: + list_sizes = [self._to_node_size(s) for s in response['items']] + return list_sizes + + def list_volumes(self, ex_zone=None): + """ + Return a list of volumes for a zone or all. + + Will return list from provided zone, or from the default zone unless + given the value of 'all'. + + @keyword region: The zone to return volumes from. + @type region: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: A list of volume objects. + @rtype: C{list} of L{StorageVolume} + """ + list_volumes = [] + zone = ex_zone or self.zone + if zone == 'all': + zone = None + if zone is None: + request = '/aggregated/disks' + elif hasattr(zone, 'name'): + request = '/zones/%s/disks' % zone.name + else: + request = '/zones/%s/disks' % zone + + response = self.connection.request(request, method='GET').object + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_volumes = [self._to_storage_volume(d) for d in + v.get('disks', [])] + list_volumes.extend(zone_volumes) + else: + list_volumes = [self._to_storage_volume(d) for d in + response['items']] + return list_volumes + + def ex_list_zones(self): + """ + Return the list of zones. + + @return: A list of zone objects. + @rtype: C{list} of L{GCEZone} + """ + list_zones = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_zones = [self._to_zone(z) for z in response['items']] + return list_zones + + def ex_create_address(self, name, region=None): + """ + Create a static address in a region. + + @param name: Name of static address + @type name: C{str} + + @param region: Name of region for the addres (e.g. 'us-central1') + @type region: C{str} + + @return: Static Address object + @rtype: L{GCEAddress} + """ + if region is None and self.zone: + region = '-'.join(self.zone.name.split('-')[:-1]) + elif region is None: + raise GCEError('REGION_NOT_SPECIFIED', + 'Region must be provided for an address') + address_data = {'name': name} + request = '/regions/%s/addresses' % region + response = self.connection.async_request(request, method='POST', + data=address_data).object + if 'error' in response: + self._categorize_error(response['error']) + return self.ex_get_address(name, region=region) + + def ex_create_firewall(self, name, allowed, network='default', + source_ranges=None, source_tags=None): + """ + Create a firewall on a network. + + Firewall rules should be supplied in the "allowed" field. This is a + list of dictionaries formated like so ("ports" is optional):: + [{"IPProtocol": "", + "ports": [ ""}] + + For example, to allow tcp on port 8080 and udp on all ports, 'allowed' + would be:: + [{"IPProtocol": "tcp", + "ports": ["8080"]}, + {"IPProtocol": "udp"}] + See U{Firewall Reference} for more information. + + @param name: Name of the firewall to be created + @type name: C{str} + + @param allowed: List of dictionaries with rules + @type allowed: C{list} of C{dict} + + @keyword network: The network that the firewall applies to. + @type network: C{str} or L{GCENetwork} + + @keyword source_ranges: A list of IP ranges in CIDR format that the + firewall should apply to. + @type source_ranges: C{list} of C{str} + + @keyword source_tags: A list of instance tags which the rules apply + @type source_tags: C{list} of C{str} + + @return: Firewall object + @rtype: L{GCEFirewall} + """ + firewall_data = {} + if not hasattr(network, 'name'): + nw = self.ex_get_network(network) + else: + nw = network + + firewall_data['name'] = name + firewall_data['allowed'] = allowed + firewall_data['network'] = nw.extra['selfLink'] + if source_ranges is not None: + firewall_data['sourceRanges'] = source_ranges + if source_tags is not None: + firewall_data['sourceTags'] = source_tags + + request = '/global/firewalls' + + response = self.connection.async_request(request, method='POST', + data=firewall_data).object + if 'error' in response: + self._categorize_error(response['error']) + return self.ex_get_firewall(name) + + def ex_create_network(self, name, cidr): + """ + Create a network. + + @param name: Name of network to be created + @type name: C{str} + + @param cidr: Address range of network in CIDR format. + @type cidr: C{str} + + @return: Network object + @rtype: L{GCENetwork} + """ + network_data = {} + network_data['name'] = name + network_data['IPv4Range'] = cidr + + request = '/global/networks' + + response = self.connection.async_request(request, method='POST', + data=network_data).object + if 'error' in response: + self._categorize_error(response['error']) + + return self.ex_get_network(name) + + def _create_node_req(self, name, size, image, location, network, + tags=None, metadata=None, boot_disk=None): + """ + Returns a request and body to create a new node. This is a helper + method to suppor both L{create_node} and L{ex_create_multiple_nodes}. + + @param name: The name of the node to create. + @type name: C{str} + + @param size: The machine type to use. + @type size: L{GCENodeSize} + + @param image: The image to use to create the node (or, if using a + persistent disk, the image the disk was created from). + @type image: L{NodeImage} + + @param location: The location (zone) to create the node in. + @type location: L{NodeLocation} or L{GCEZone} + + @param network: The network to associate with the node. + @type network: L{GCENetwork} + + @keyword tags: A list of tags to assiciate with the node. + @type tags: C{list} of C{str} + + @keyword metadata: Metadata dictionary for instance. + @type metadata: C{dict} + + @keyword boot_disk: Persistent boot disk to attach + @type L{StorageVolume} + + @return: A tuple containing a request string and a node_data dict. + @rtype: C{tuple} of C{str} and C{dict} + """ + node_data = {} + node_data['machineType'] = size.extra['selfLink'] + node_data['name'] = name + if tags: + node_data['tags'] = {'items': tags} + if metadata: + node_data['metadata'] = metadata + if boot_disk: + disks = [{'kind': 'compute#attachedDisk', + 'boot': True, + 'type': 'PERSISTENT', + 'mode': 'READ_WRITE', + 'deviceName': boot_disk.name, + 'zone': boot_disk.extra['zone'].extra['selfLink'], + 'source': boot_disk.extra['selfLink']}] + node_data['disks'] = disks + node_data['kernel'] = image.extra['preferredKernel'] + else: + node_data['image'] = image.extra['selfLink'] + + ni = [{'kind': 'compute#instanceNetworkInterface', + 'accessConfigs': [{'name': 'External NAT', + 'type': 'ONE_TO_ONE_NAT'}], + 'network': network.extra['selfLink']}] + node_data['networkInterfaces'] = ni + + request = '/zones/%s/instances' % location.name + + return request, node_data + + def create_node(self, name, size, image, location=None, + ex_network='default', ex_tags=None, ex_metadata=None, + ex_boot_disk=None): + """ + Create a new node and return a node object for the node. + + @param name: The name of the node to create. + @type name: C{str} + + @param size: The machine type to use. + @type size: C{str} or L{GCENodeSize} + + @param image: The image to use to create the node (or, if attaching + a persistent disk, the image used to create the disk) + @type image: C{str} or L{NodeImage} + + @keyword location: The location (zone) to create the node in. + @type location: C{str} or L{NodeLocation} or L{GCEZone} or C{None} + + @keyword ex_network: The network to associate with the node. + @type ex_network: C{str} or L{GCENetwork} + + @keyword ex_tags: A list of tags to assiciate with the node. + @type ex_tags: C{list} of C{str} or C{None} + + @keyword ex_metadata: Metadata dictionary for instance. + @type ex_metadata: C{dict} or C{None} + + @keyword ex_boot_disk: The boot disk to attach to the instance. + @type ex_boot_disk: L{StorageVolume} + + @return: A Node object for the new node. + @rtype: L{Node} + """ + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + request, node_data = self._create_node_req(name, size, image, + location, ex_network, + ex_tags, ex_metadata, + ex_boot_disk) + response = self.connection.async_request(request, method='POST', + data=node_data).object + if 'error' in response: + self._categorize_error(response['error']) + + return self.ex_get_node(name, location.name) + + def ex_create_multiple_nodes(self, base_name, size, image, number, + location=None, ex_network='default', + ex_tags=None, ex_metadata=None, + ignore_errors=True, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Create multiple nodes and return a list of Node objects. + + Nodes will be named with the base name and a number. For example, if + the base name is 'libcloud' and you create 3 nodes, they will be + named:: + libcloud-000 + libcloud-001 + libcloud-002 + + @param base_name: The base name of the nodes to create. + @type base_name: C{str} + + @param size: The machine type to use. + @type size: C{str} or L{GCENodeSize} + + @param image: The image to use to create the nodes. + @type image: C{str} or L{NodeImage} + + @param number: The number of nodes to create. + @type number: C{int} + + @keyword location: The location (zone) to create the nodes in. + @type location: C{str} or L{NodeLocation} or L{GCEZone} or C{None} + + @keyword ex_network: The network to associate with the nodes. + @type ex_network: C{str} or L{GCENetwork} + + @keyword ex_tags: A list of tags to assiciate with the nodes. + @type ex_tags: C{list} of C{str} or C{None} + + @keyword ex_metadata: Metadata dictionary for instances. + @type ex_metadata: C{dict} or C{None} + + @keyword ignore_errors: If True, don't raise Exceptions if one or + more nodes fails. + @type ignore_errors: C{bool} + + @keyword timeout: The number of seconds to wait for all nodes to be + created before timing out. + + @return: A list of Node objects for the new nodes. + @rtype: C{list} of L{Node} + """ + node_data = {} + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + node_list = [None] * number + responses = [] + for i in range(number): + name = '%s-%03d' % (base_name, i) + request, node_data = self._create_node_req(name, size, image, + location, ex_network, + ex_tags, ex_metadata) + response = self.connection.request(request, method='POST', + data=node_data) + responses.append(response.object) + + start_time = time.time() + complete = False + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting for multiple " + "instances") + complete = True + for i, operation in enumerate(responses): + if operation is None: + continue + response = self.connection.request( + operation['selfLink']).object + if response['status'] == 'DONE': + responses[i] = None + name = '%s-%03d' % (base_name, i) + if 'error' in response: + if ignore_errors: + error = response['error']['errors'][0] + node_list[i] = GCEFailedNode(name, error) + else: + self._categorize_error(response['error']) + else: + node_list[i] = self.ex_get_node(name, location.name) + else: + complete = False + time.sleep(2) + return node_list + + def create_volume(self, size, name, location=None, image=None, + snapshot=None): + """ + Create a volume (disk). + + @param size: Size of volume to create (in GB). Can be None if image + or snapshot is supplied. + @type size: C{int} or C{str} or C{None} + + @param name: Name of volume to create + @type name: C{str} + + @keyword location: Location (zone) to create the volume in + @type location: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @keyword image: Image to create disk from. + @type image: L{NodeImage} or C{str} or C{None} + + @keyword snapshot: Snapshot to create image from + @type snapshot: C{str} + + @return: Storage Volume object + @rtype: L{StorageVolume} + """ + volume_data = {} + params = None + volume_data['name'] = name + if size: + volume_data['sizeGb'] = str(size) + if image: + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + params = {'sourceImage': image.extra['selfLink']} + if snapshot: + volume_data['sourceSnapshot'] = snapshot + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + request = '/zones/%s/disks' % location.name + response = self.connection.async_request(request, method='POST', + data=volume_data, + params=params).object + if 'error' in response: + self._categorize_error(response['error']) + + return self.ex_get_volume(name) + + def ex_update_firewall(self, firewall): + """ + Update a firewall with new values. + + To update, change the attributes of the firewall object and pass the + updated object to the method. + + @param firewall: A firewall object with updated values. + @type firewall: L{GCEFirewall} + + @return: An object representing the new state of the firewall. + @rtype: L{GCEFirewall} + """ + firewall_data = {} + firewall_data['name'] = firewall.name + firewall_data['allowed'] = firewall.allowed + firewall_data['network'] = firewall.network.extra['selfLink'] + if firewall.source_ranges: + firewall_data['sourceRanges'] = firewall.source_ranges + if firewall.source_tags: + firewall_data['sourceTags'] = firewall.source_tags + if firewall.extra['description']: + firewall_data['description'] = firewall.extra['description'] + + request = '/global/firewalls/%s' % firewall.name + + response = self.connection.async_request(request, method='PUT', + data=firewall_data).object + if 'error' in response: + self._categorize_error(response['error']) + + return self.ex_get_firewall(firewall.name) + + def reboot_node(self, node): + """ + Reboot a node. + + @param node: Node to be rebooted + @type node: L{Node} + + @return: True if successful, False if not + @rtype: C{bool} + """ + request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name, + node.name) + response = self.connection.async_request(request, method='POST', + data='ignored').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_set_node_tags(self, node, tags): + """ + Set the tags on a Node instance. + + Note that this updates the node object directly. + + @param node: Node object + @type node: L{Node} + + @param tags: List of tags to apply to the object + @type tags: C{list} of C{str} + + @return: True if successful + @rtype: C{bool} + """ + request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name, + node.name) + + tags_data = {} + tags_data['items'] = tags + tags_data['fingerprint'] = node.extra['tags_fingerprint'] + + response = self.connection.async_request(request, method='POST', + data=tags_data).object + if 'error' in response: + self._categorize_error(response['error']) + else: + new_node = self.ex_get_node(node.name) + node.extra['tags'] = new_node.extra['tags'] + node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint'] + return True + + def deploy_node(self, name, size, image, script, location=None, + ex_network='default', ex_tags=None): + """ + Create a new node and run a script on start-up. + + @param name: The name of the node to create. + @type name: C{str} + + @param size: The machine type to use. + @type size: C{str} or L{GCENodeSize} + + @param image: The image to use to create the node. + @type image: C{str} or L{NodeImage} + + @param script: File path to start-up script + @type script: C{str} + + @keyword location: The location (zone) to create the node in. + @type location: C{str} or L{NodeLocation} or L{GCEZone} or C{None} + + @keyword ex_network: The network to associate with the node. + @type ex_network: C{str} or L{GCENetwork} + + @keyword ex_tags: A list of tags to assiciate with the node. + @type ex_tags: C{list} of C{str} or C{None} + + @return: A Node object for the new node. + @rtype: L{Node} + """ + with open(script, 'r') as f: + script_data = f.read() + metadata = {'items': [{'key': 'startup-script', + 'value': script_data}]} + + return self.create_node(name, size, image, location=location, + ex_network=ex_network, ex_tags=ex_tags, + ex_metadata=metadata) + + def attach_volume(self, node, volume, device=None, ex_mode=None, + ex_boot=False): + """ + Attach a volume to a node. + + If volume is None, a scratch disk will be created and attached. + + @param node: The node to attach the volume to + @type node: L{Node} + + @param volume: The volume to attach. If none, a scratch disk will be + attached. + @type volume: L{StorageVolume} or C{None} + + @keyword device: The device name to attach the volume as. Defaults to + volume name. + @type device: C{str} + + @keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY' + @type ex_mode: C{str} + + @keyword ex_boot: If true, disk will be attached as a boot disk + @type ex_boot: C{bool} + + @return: True if successful + @rtype: C{bool} + """ + volume_data = {} + if volume is None: + volume_data['type'] = 'SCRATCH' + else: + volume_data['type'] = 'PERSISTENT' + volume_data['source'] = volume.extra['selfLink'] + volume_data['kind'] = 'compute#attachedDisk' + volume_data['mode'] = ex_mode or 'READ_WRITE' + + if device: + volume_data['deviceName'] = device + else: + volume_data['deviceName'] = volume.name + + volume_data['boot'] = ex_boot + + request = '/zones/%s/instances/%s/attachDisk' % ( + node.extra['zone'].name, node.name) + response = self.connection.async_request(request, method='POST', + data=volume_data).object + if 'error' in response: + self._cateforize_error(response['error']) + else: + return True + + def detach_volume(self, volume, ex_node=None): + """ + Detach a volume from a node. + + @param volume: Volume object to detach + @type volume: L{StorageVolume} + + @keyword ex_node: Node object to detach volume from (required) + @type ex_node: L{Node} + + @return: True if successful + @rtype: C{bool} + """ + if not ex_node: + return False + request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % ( + ex_node.extra['zone'].name, ex_node.name, volume.name) + + response = self.connection.async_request(request, method='POST', + data='ignored').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_destroy_address(self, address): + """ + Destroy a static address. + + @param address: Address object to destroy + @type address: L{GCEAddress} + + @return: True if successful + @rtype: C{bool} + """ + request = '/regions/%s/addresses/%s' % (address.region, address.name) + + response = self.connection.async_request(request, + method='DELETE').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_destroy_firewall(self, firewall): + """ + Destroy a firewall. + + @param firewall: Firewall object to destroy + @type firewall: L{GCEFirewall} + + @return: True if successful + @rtype: C{bool} + """ + request = '/global/firewalls/%s' % firewall.name + response = self.connection.async_request(request, + method='DELETE').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_destroy_network(self, network): + """ + Destroy a network. + + @param network: Network object to destroy + @type network: L{GCENetwork} + + @return: True if successful + @rtype: C{bool} + """ + request = '/global/networks/%s' % network.name + response = self.connection.async_request(request, + method='DELETE').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def destroy_node(self, node): + """ + Destroy a node. + + @param node: Node object to destroy + @type node: L{Node} + + @return: True if successful + @rtype: C{bool} + """ + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + response = self.connection.async_request(request, + method='DELETE').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_destroy_multiple_nodes(self, nodelist, ignore_errors=True, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Destroy multiple nodes at once. + + @param nodelist: List of nodes to destroy + @type nodelist: C{list} of L{Node} + + @keyword ignore_errors: If true, don't raise an exception if one or + more nodes fails to be destroyed. + @type ignore_errors: C{bool} + + @keyword timeout: Number of seconds to wait for all nodes to be + destroyed. + @type timeout: C{int} + + @return: A list of boolean values. One for each node. True means + that the node was successfully destroyed. + @rtype: C{list} of C{bool} + """ + responses = [] + success = [False] * len(nodelist) + complete = False + start_time = time.time() + for node in nodelist: + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + response = self.connection.request(request, method='DELETE').object + responses.append(response) + + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting to delete " + "multiple instances") + complete = True + for i, operation in enumerate(responses): + if operation is None: + continue + response = self.connection.request( + operation['selfLink']).object + if response['status'] == 'DONE': + responses[i] = None + if 'error' in response: + if ignore_errors: + success[i] = False + else: + self._categorize_error(response['error']) + else: + success[i] = True + else: + complete = False + time.sleep(2) + return success + + def destroy_volume(self, volume): + """ + Destroy a volume. + + @param volume: Volume object to destroy + @type volume: L{StorageVolume} + + @return: True if successful + @rtype: C{bool} + """ + request = '/zones/%s/disks/%s' % (volume.extra['zone'].name, + volume.name) + response = self.connection.async_request(request, + method='DELETE').object + if 'error' in response: + self._categorize_error(response['error']) + else: + return True + + def ex_get_address(self, name, region=None): + """ + Return an Address object based on an address name and optional region. + + @param name: The name of the address + @type name: C{str} + + @keyword region: The region to search for the address in + @type region: C{str} or C{None} + + @return: An Address object for the address + @rtype: L{GCEAddress} + """ + address_region = region or self._find_zone(name, 'addresses', + region=True) + request = '/regions/%s/addresses/%s' % (address_region, name) + response = self.connection.request(request, method='GET').object + return self._to_address(response) + + def ex_get_firewall(self, name): + """ + Return a Firewall object based on the firewall name. + + @param name: The name of the firewall + @type name: C{str} + + @return: A GCEFirewall object + @rtype: L{GCEFirewall} + """ + request = '/global/firewalls/%s' % name + response = self.connection.request(request, method='GET').object + return self._to_firewall(response) + + def ex_get_image(self, partial_name): + """ + Return an NodeImage object based on the name or link provided. + + @param partial_name: The name, partial name, or full path of a GCE + image. + @type partial_name: C{str} + + @return: NodeImage object based on provided information + @rtype: L{NodeImage} + """ + if partial_name.startswith('https://'): + response = self.connection.request(partial_name, method='GET') + return self._to_node_image(response.object) + image = self._match_images(None, partial_name) + if not image: + if partial_name.startswith('debian'): + image = self._match_images('debian-cloud', partial_name) + elif partial_name.startswith('centos'): + image = self._match_images('centos-cloud', partial_name) + + return image + + def ex_get_network(self, name): + """ + Return a Network object based on a network name. + + @param name: The name of the network + @type name: C{str} + + @return: A Network object for the network + @rtype: L{GCENetwork} + """ + request = '/global/networks/%s' % name + response = self.connection.request(request, method='GET').object + return self._to_network(response) + + def ex_get_node(self, name, zone=None): + """ + Return a Node object based on a node name and optional zone. + + @param name: The name of the node + @type name: C{str} + + @keyword zone: The zone to search for the node in + @type zone: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: A Node object for the node + @rtype: L{Node} + """ + zone = zone or self.zone or self._find_zone(name, 'instances') + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + request = '/zones/%s/instances/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node(response) + + def ex_get_project(self): + """ + Return a Project object with project-wide information. + + @return: A GCEProject object + @rtype: L{GCEProject} + """ + response = self.connection.request('', method='GET').object + return self._to_project(response) + + def ex_get_size(self, name, zone=None): + """ + Return a size object based on a machine type name and zone. + + @param name: The name of the node + @type name: C{str} + + @keyword zone: The zone to search for the machine type in + @type zone: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: A GCENodeSize object for the machine type + @rtype: L{GCENodeSize} + """ + zone = zone or self.zone + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + request = '/zones/%s/machineTypes/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node_size(response) + + def ex_get_volume(self, name, zone=None): + """ + Return a Volume object based on a volume name and optional zone. + + @param name: The name of the volume + @type name: C{str} + + @keyword zone: The zone to search for the volume in + @type zone: C{str} or L{GCEZone} or L{NodeLocation} or C{None} + + @return: A StorageVolume object for the volume + @rtype: L{StorageVolume} + """ + zone = zone or self.zone or self.find_zone(name, 'disks') + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + request = '/zones/%s/disks/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_storage_volume(response) + + def ex_get_zone(self, name): + """ + Return a Zone object based on the zone name. + + @param name: The name of the zone. + @type name: C{str} + + @return: A GCEZone object for the zone + @rtype: L{GCEZone} + """ + if name.startswith('https://'): + short_name = name.split('/')[-1] + request = name + else: + short_name = name + request = '/zones/%s' % name + # Check zone cache first + if short_name in self.zone_dict: + return self.zone_dict[short_name] + # Otherwise, look up zone information + response = self.connection.request(request, method='GET').object + return self._to_zone(response) + + def _to_address(self, address): + """ + Return an Address object from the json-response dictionary. + + @param address: The dictionary describing the address. + @type address: C{dict} + + @return: Address object + @rtype: L{GCEAddress} + """ + extra = {} + + extra['selfLink'] = address['selfLink'] + extra['status'] = address['status'] + extra['region'] = address['region'] + extra['creationTimestamp'] = address['creationTimestamp'] + region = address['region'].split('/')[-1] + + return GCEAddress(id=address['id'], name=address['name'], + address=address['address'], + region=region, driver=self, extra=extra) + + def _to_firewall(self, firewall): + """ + Return a Firewall object from the json-response dictionary. + + @param firewall: The dictionary describing the firewall. + @type firewall: C{dict} + + @return: Firewall object + @rtype: L{GCEFirewall} + """ + extra = {} + extra['selfLink'] = firewall['selfLink'] + extra['creationTimestamp'] = firewall['creationTimestamp'] + extra['description'] = firewall.get('description') + extra['network_name'] = firewall['network'].split('/')[-1] + + network = self.ex_get_network(extra['network_name']) + source_ranges = firewall.get('sourceRanges') + source_tags = firewall.get('sourceTags') + + return GCEFirewall(id=firewall['id'], name=firewall['name'], + allowed=firewall['allowed'], network=network, + source_ranges=source_ranges, + source_tags=source_tags, + driver=self, extra=extra) + + def _to_network(self, network): + """ + Return a Network object from the json-response dictionary. + + @param network: The dictionary describing the network. + @type network: C{dict} + + @return: Network object + @rtype: L{GCENetwork} + """ + extra = {} + + extra['selfLink'] = network['selfLink'] + extra['gatewayIPv4'] = network['gatewayIPv4'] + extra['description'] = network.get('description') + extra['creationTimestamp'] = network['creationTimestamp'] + + return GCENetwork(id=network['id'], name=network['name'], + cidr=network['IPv4Range'], + driver=self, extra=extra) + + def _to_node_image(self, image): + """ + Return an Image object from the json-response dictionary. + + @param image: The dictionary describing the image. + @type image: C{dict} + + @return: Image object + @rtype: L{NodeImage} + """ + extra = {} + extra['preferredKernel'] = image['preferredKernel'] + extra['description'] = image['description'] + extra['creationTimestamp'] = image['creationTimestamp'] + extra['selfLink'] = image['selfLink'] + return NodeImage(id=image['id'], name=image['name'], driver=self, + extra=extra) + + def _to_node_location(self, location): + """ + Return a Location object from the json-response dictionary. + + @param location: The dictionary describing the location. + @type location: C{dict} + + @return: Location object + @rtype: L{NodeLocation} + """ + return NodeLocation(id=location['id'], name=location['name'], + country=location['name'].split('-')[0], + driver=self) + + def _to_node(self, node): + """ + Return a Node object from the json-response dictionary. + + @param node: The dictionary describing the node. + @type node: C{dict} + + @return: Node object + @rtype: L{Node} + """ + public_ips = [] + private_ips = [] + extra = {} + + extra['status'] = node['status'] + extra['description'] = node.get('description') + extra['zone'] = self.ex_get_zone(node['zone']) + extra['image'] = node.get('image') + extra['disks'] = node['disks'] + extra['networkInterfaces'] = node['networkInterfaces'] + extra['id'] = node['id'] + extra['selfLink'] = node['selfLink'] + extra['name'] = node['name'] + extra['metadata'] = node['metadata'] + extra['tags_fingerprint'] = node['tags']['fingerprint'] + + if 'items' in node['tags']: + tags = node['tags']['items'] + else: + tags = [] + extra['tags'] = tags + + for network_interface in node['networkInterfaces']: + private_ips.append(network_interface['networkIP']) + for access_config in network_interface['accessConfigs']: + public_ips.append(access_config['natIP']) + + return Node(id=node['id'], name=node['name'], + state=self.NODE_STATE_MAP[node['status']], + public_ips=public_ips, private_ips=private_ips, + driver=self, size=node['machineType'], + image=node.get('image'), extra=extra) + + def _to_node_size(self, machine_type): + """ + Return a Size object from the json-response dictionary. + + @param machine_type: The dictionary describing the machine. + @type machine_type: C{dict} + + @return: Size object + @rtype: L{GCENodeSize} + """ + extra = {} + extra['selfLink'] = machine_type['selfLink'] + extra['zone'] = self.ex_get_zone(machine_type['zone']) + extra['description'] = machine_type['description'] + extra['guestCpus'] = machine_type['guestCpus'] + extra['creationTimestamp'] = machine_type['creationTimestamp'] + try: + price = self._get_size_price(size_id=machine_type['name']) + except KeyError: + price = None + + return GCENodeSize(id=machine_type['id'], name=machine_type['name'], + ram=machine_type['memoryMb'], + disk=machine_type['imageSpaceGb'], + bandwidth=0, price=price, driver=self, extra=extra) + + def _to_project(self, project): + """ + Return a Project object from the json-response dictionary. + + @param project: The dictionary describing the project. + @type project: C{dict} + + @return: Project object + @rtype: L{GCEProject} + """ + extra = {} + extra['selfLink'] = project['selfLink'] + extra['creationTimestamp'] = project['creationTimestamp'] + extra['description'] = project['description'] + metadata = project['commonInstanceMetadata'].get('items') + + return GCEProject(id=project['id'], name=project['name'], + metadata=metadata, quotas=project['quotas'], + driver=self, extra=extra) + + def _to_storage_volume(self, volume): + """ + Return a Volume object from the json-response dictionary. + + @param volume: The dictionary describing the volume. + @type volume: C{dict} + + @return: Volume object + @rtype: L{StorageVolume} + """ + extra = {} + extra['selfLink'] = volume['selfLink'] + extra['zone'] = self.ex_get_zone(volume['zone']) + extra['status'] = volume['status'] + extra['creationTimestamp'] = volume['creationTimestamp'] + + return StorageVolume(id=volume['id'], name=volume['name'], + size=volume['sizeGb'], driver=self, extra=extra) + + def _to_zone(self, zone): + """ + Return a Zone object from the json-response dictionary. + + @param zone: The dictionary describing the zone. + @type zone: C{dict} + + @return: Zone object + @rtype: L{GCEZone} + """ + extra = {} + extra['selfLink'] = zone['selfLink'] + extra['creationTimestamp'] = zone['creationTimestamp'] + extra['description'] = zone['description'] + + deprecated = zone.get('deprecated') + + return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'], + maintenance_windows=zone['maintenanceWindows'], + quotas=zone['quotas'], deprecated=deprecated, + driver=self, extra=extra) diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 54f0aa9566..422d8576b8 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -64,6 +64,8 @@ ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaZrhNodeDriver'), Provider.CLOUDSIGMA_US: ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'), + Provider.GCE: + ('libcloud.compute.drivers.gce', 'GCENodeDriver'), Provider.GOGRID: ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), Provider.RACKSPACE: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 2f076cd90a..639bf59ccc 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -44,6 +44,7 @@ class Provider(object): @cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers @cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers @cvar SLICEHOST: Slicehost.com + @cvar GCE: Google Compute Engine @cvar GOGRID: GoGrid @cvar VPSNET: VPS.net @cvar LINODE: Linode.com @@ -72,6 +73,7 @@ class Provider(object): DUMMY = 'dummy' EC2 = 'ec2_us_east' SLICEHOST = 'slicehost' + GCE = 'gce' GOGRID = 'gogrid' VPSNET = 'vpsnet' LINODE = 'linode' diff --git a/libcloud/test/common/test_google.py b/libcloud/test/common/test_google.py new file mode 100644 index 0000000000..35da4b291a --- /dev/null +++ b/libcloud/test/common/test_google.py @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for Google Connection classes. +""" +import datetime +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib + +from libcloud.test import MockHttp, LibcloudTestCase, MockResponse +from libcloud.common.google import (GoogleAuthError, + GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleServiceAcctAuthConnection, + GoogleBaseConnection) +from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS + +# Skip some tests if PyCrypto is unavailable +try: + from Crypto.Hash import SHA256 +except ImportError: + SHA256 = None + + +class MockJsonResponse(object): + def __init__(self, body): + self.object = body + + +class GoogleBaseAuthConnectionTest(LibcloudTestCase): + """ + Tests for GoogleBaseAuthConnection + """ + GoogleBaseAuthConnection._now = lambda x: datetime.datetime(2013, 6, 26, + 19, 0, 0) + + def setUp(self): + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scope = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scope': self.mock_scope} + self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, + **kwargs) + + def test_add_default_headers(self): + old_headers = {} + expected_headers = { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Host': 'accounts.google.com'} + new_headers = self.conn.add_default_headers(old_headers) + self.assertEqual(new_headers, expected_headers) + + def test_token_request(self): + request_body = {'code': 'asdf', 'client_id': self.conn.user_id, + 'client_secret': self.conn.key, + 'redirect_uri': self.conn.redirect_uri, + 'grant_type': 'authorization_code'} + new_token = self.conn._token_request(request_body) + self.assertEqual(new_token['access_token'], 'installedapp') + self.assertEqual(new_token['expire_time'], '2013-06-26T20:00:00Z') + + +class GoogleInstalledAppAuthConnectionTest(LibcloudTestCase): + """ + Tests for GoogleInstalledAppAuthConnection + """ + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + + def setUp(self): + GoogleInstalledAppAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scope = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scope': self.mock_scope} + self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, + **kwargs) + + def test_refresh_token(self): + # This token info doesn't have a refresh token, so a new token will be + # requested + token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer', + 'expires_in': 3600} + new_token1 = self.conn.refresh_token(token_info1) + self.assertEqual(new_token1['access_token'], 'installedapp') + + # This token info has a refresh token, so it will be able to be + # refreshed. + token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer', + 'expires_in': 3600, 'refresh_token': 'refreshrefresh'} + new_token2 = self.conn.refresh_token(token_info2) + self.assertEqual(new_token2['access_token'], 'refreshrefresh') + + # Both sets should have refresh info + self.assertTrue('refresh_token' in new_token1) + self.assertTrue('refresh_token' in new_token2) + + +class GoogleBaseConnectionTest(LibcloudTestCase): + """ + Tests for GoogleBaseConnection + """ + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + GoogleServiceAcctAuthConnection.get_new_token = \ + lambda x: x._token_request({}) + GoogleBaseConnection._now = lambda x: datetime.datetime(2013, 6, 26, + 19, 0, 0) + + def setUp(self): + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scope = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scope': self.mock_scope, 'auth_type': 'IA'} + self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + + def test_auth_type(self): + self.assertRaises(GoogleAuthError, GoogleBaseConnection, *GCE_PARAMS, + **{'auth_type': 'XX'}) + + kwargs = {'scope': self.mock_scope} + + if SHA256: + kwargs['auth_type'] = 'SA' + conn1 = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + self.assertTrue(isinstance(conn1.auth_conn, + GoogleServiceAcctAuthConnection)) + + kwargs['auth_type'] = 'IA' + conn2 = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + self.assertTrue(isinstance(conn2.auth_conn, + GoogleInstalledAppAuthConnection)) + + def test_add_default_headers(self): + old_headers = {} + new_expected_headers = {'Content-Type': 'application/json', + 'Host': 'www.googleapis.com'} + new_headers = self.conn.add_default_headers(old_headers) + self.assertEqual(new_headers, new_expected_headers) + + def test_pre_connect_hook(self): + old_params = {} + old_headers = {} + new_expected_params = {} + new_expected_headers = {'Authorization': 'Bearer installedapp'} + new_params, new_headers = self.conn.pre_connect_hook(old_params, + old_headers) + self.assertEqual(new_params, new_expected_params) + self.assertEqual(new_headers, new_expected_headers) + + def test_encode_data(self): + data = {'key': 'value'} + json_data = '{"key": "value"}' + encoded_data = self.conn.encode_data(data) + self.assertEqual(encoded_data, json_data) + + def test_has_completed(self): + body1 = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "status": "DONE", + "targetId": "16211908079305042870"} + body2 = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "status": "RUNNING", + "targetId": "16211908079305042870"} + response1 = MockJsonResponse(body1) + response2 = MockJsonResponse(body2) + self.assertTrue(self.conn.has_completed(response1)) + self.assertFalse(self.conn.has_completed(response2)) + + def test_get_poll_request_kwargs(self): + body = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "selfLink": "https://www.googleapis.com/operations-test"} + response = MockJsonResponse(body) + expected_kwargs = {'action': + 'https://www.googleapis.com/operations-test'} + kwargs = self.conn.get_poll_request_kwargs(response, None, {}) + self.assertEqual(kwargs, expected_kwargs) + + def test_morph_action_hook(self): + self.conn.request_path = '/compute/apiver/project/project-name' + action1 = ('https://www.googleapis.com/compute/apiver/project' + '/project-name/instances') + action2 = '/instances' + expected_request = '/compute/apiver/project/project-name/instances' + request1 = self.conn.morph_action_hook(action1) + request2 = self.conn.morph_action_hook(action2) + self.assertEqual(request1, expected_request) + self.assertEqual(request2, expected_request) + + +class GoogleAuthMockHttp(MockHttp): + """ + Mock HTTP Class for Google Auth Connections. + """ + json_hdr = {'content-type': 'application/json; charset=UTF-8'} + + def _o_oauth2_token(self, method, url, body, headers): + token_info = {'access_token': 'tokentoken', + 'token_type': 'Bearer', + 'expires_in': 3600} + refresh_token = {'access_token': 'refreshrefresh', + 'token_type': 'Bearer', + 'expires_in': 3600} + ia_token = {'access_token': 'installedapp', + 'token_type': 'Bearer', + 'expires_in': 3600, + 'refresh_token': 'refreshrefresh'} + if 'code' in body: + body = json.dumps(ia_token) + elif 'refresh_token' in body: + body = json.dumps(refresh_token) + else: + body = json.dumps(token_info) + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/compute/fixtures/gce/aggregated_addresses.json b/libcloud/test/compute/fixtures/gce/aggregated_addresses.json new file mode 100644 index 0000000000..6ac222e03d --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/aggregated_addresses.json @@ -0,0 +1,71 @@ +{ + "id": "projects/project_name/aggregated/addresses", + "items": { + "regions/europe-west1": { + "addresses": [ + { + "address": "192.158.29.247", + "creationTimestamp": "2013-06-26T09:51:47.506-07:00", + "description": "", + "id": "10955781597205896134", + "kind": "compute#address", + "name": "libcloud-demo-europe-address", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/europe-west1/addresses/libcloud-demo-europe-address", + "status": "RESERVED" + } + ] + }, + "regions/us-central1": { + "addresses": [ + { + "address": "173.255.113.20", + "creationTimestamp": "2013-06-26T12:21:40.625-07:00", + "description": "", + "id": "01531551729918243104", + "kind": "compute#address", + "name": "lcaddress", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "status": "RESERVED" + }, + { + "address": "108.59.82.4", + "creationTimestamp": "2013-06-26T09:48:31.184-07:00", + "description": "", + "id": "17634862894218443422", + "kind": "compute#address", + "name": "libcloud-demo-address", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", + "status": "RESERVED" + }, + { + "address": "173.255.114.104", + "creationTimestamp": "2013-06-04T16:28:43.764-07:00", + "description": "", + "id": "11879548153827627972", + "kind": "compute#address", + "name": "testaddress", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/testaddress", + "status": "RESERVED" + } + ] + }, + "regions/us-central2": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/us-central2" + } + ], + "message": "There are no results for scope 'regions/us-central2' on this page." + } + } + }, + "kind": "compute#addressAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/aggregated/addresses" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/aggregated_disks.json b/libcloud/test/compute/fixtures/gce/aggregated_disks.json new file mode 100644 index 0000000000..1190ab4629 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/aggregated_disks.json @@ -0,0 +1,81 @@ +{ + "id": "projects/project_name/aggregated/disks", + "items": { + "zones/europe-west1-a": { + "disks": [ + { + "creationTimestamp": "2013-06-26T09:50:22.508-07:00", + "id": "0811494794539478718", + "kind": "compute#disk", + "name": "libcloud-demo-europe-boot-disk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/europe-west1-b" + } + ], + "message": "There are no results for scope 'zones/europe-west1-b' on this page." + } + }, + "zones/us-central1-a": { + "disks": [ + { + "creationTimestamp": "2013-06-25T10:57:34.305-07:00", + "id": "14383387450728762434", + "kind": "compute#disk", + "name": "test-disk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/test-disk", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "creationTimestamp": "2013-06-26T09:47:09.178-07:00", + "id": "10880026303683859871", + "kind": "compute#disk", + "name": "libcloud-demo-boot-disk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/libcloud-demo-boot-disk", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + } + ] + }, + "zones/us-central1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central1-b" + } + ], + "message": "There are no results for scope 'zones/us-central1-b' on this page." + } + }, + "zones/us-central2-a": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central2-a" + } + ], + "message": "There are no results for scope 'zones/us-central2-a' on this page." + } + } + }, + "kind": "compute#diskAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/aggregated/disks" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/aggregated_instances.json b/libcloud/test/compute/fixtures/gce/aggregated_instances.json new file mode 100644 index 0000000000..3e72affcaa --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/aggregated_instances.json @@ -0,0 +1,414 @@ +{ + "id": "projects/project_name/aggregated/instances", + "items": { + "zones/europe-west1-a": { + "instances": [ + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:13:38.295-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "4658881585544531189", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.167", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.144.78" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:13:21.549-07:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "0681789716029574243", + "kernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.121", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.206.91" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:12:29.726-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "14308265828754333159", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.88", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.66.77" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/europe-west1-b" + } + ], + "message": "There are no results for scope 'zones/europe-west1-b' on this page." + } + }, + "zones/us-central1-a": { + "instances": [ + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:02.386-07:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/libcloud-demo-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "2378270030714524465", + "kernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.66", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.192.190" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:19.247-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "8573880455005118258", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.107", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.224.165" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:00:12.021-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "1845312225624811608", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.115.146", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.113.94" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:10:09.700-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "03138438763739542377", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.80.244", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.147.18" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:19.662-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "17221721898919682654", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.166", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.223.109" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-multiple-nodes-001", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + } + ] + }, + "zones/us-central1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central1-b" + } + ], + "message": "There are no results for scope 'zones/us-central1-b' on this page." + } + }, + "zones/us-central2-a": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central2-a" + } + ], + "message": "There are no results for scope 'zones/us-central2-a' on this page." + } + } + }, + "kind": "compute#instanceAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/aggregated/instances" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json b/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json new file mode 100644 index 0000000000..54f8e23c33 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json @@ -0,0 +1,1683 @@ +{ + "id": "projects/project_name/aggregated/machineTypes", + "items": { + "zones/europe-west1-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/g1-small", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/f1-micro", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4-d", + "zone": "europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/f1-micro", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/g1-small", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2", + "zone": "europe-west1-b" + } + ] + }, + "zones/us-central1-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/g1-small", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-a" + } + ] + }, + "zones/us-central1-b": { + "machineTypes": [ + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/g1-small", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/f1-micro", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4", + "zone": "us-central1-b" + } + ] + }, + "zones/us-central2-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/f1-micro", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a/machineTypes/g1-small", + "zone": "us-central2-a" + } + ] + } + }, + "kind": "compute#machineTypeAggregatedList", + "nextPageToken": "ChhQRVJfUFJPSkVDVF9NQUNISU5FX1RZUEUSGjYwMDUzMTk1NTY3NS5uMS1zdGFuZGFyZC04", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/aggregated/machineTypes" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_firewalls.json b/libcloud/test/compute/fixtures/gce/global_firewalls.json new file mode 100644 index 0000000000..c25af7afd0 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_firewalls.json @@ -0,0 +1,88 @@ +{ + "id": "projects/project_name/global/firewalls", + "items": [ + { + "allowed": [ + { + "IPProtocol": "udp" + }, + { + "IPProtocol": "tcp" + }, + { + "IPProtocol": "icmp" + } + ], + "creationTimestamp": "2013-06-25T19:50:41.630-07:00", + "description": "", + "id": "5399576268464751692", + "kind": "compute#firewall", + "name": "default-allow-internal", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/default-allow-internal", + "sourceRanges": [ + "10.240.0.0/16" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "22" + ] + } + ], + "creationTimestamp": "2013-06-25T19:48:25.111-07:00", + "description": "", + "id": "8063006729705804986", + "kind": "compute#firewall", + "name": "default-ssh", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/default-ssh", + "sourceRanges": [ + "0.0.0.0/0" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "3141" + ] + } + ], + "creationTimestamp": "2013-06-26T09:51:41.593-07:00", + "id": "14041102034246553251", + "kind": "compute#firewall", + "name": "libcloud-demo-europe-firewall", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/libcloud-demo-europe-firewall", + "sourceTags": [ + "libcloud" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "3141" + ] + } + ], + "creationTimestamp": "2013-06-26T09:48:23.268-07:00", + "id": "0716768890200439066", + "kind": "compute#firewall", + "name": "libcloud-demo-firewall", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/libcloud-demo-firewall", + "sourceTags": [ + "libcloud" + ] + } + ], + "kind": "compute#firewallList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json new file mode 100644 index 0000000000..fde8474559 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json @@ -0,0 +1,19 @@ +{ + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "4567" + ] + } + ], + "creationTimestamp": "2013-06-26T10:04:43.773-07:00", + "id": "0565629596395414121", + "kind": "compute#firewall", + "name": "lcfirewall", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "sourceTags": [ + "libcloud" + ] +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json new file mode 100644 index 0000000000..75c0ce70d6 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json @@ -0,0 +1,14 @@ +{ + "id": "8983098895755095934", + "insertTime": "2013-06-26T10:04:53.453-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", + "startTime": "2013-06-26T10:04:53.508-07:00", + "status": "PENDING", + "targetId": "0565629596395414121", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json new file mode 100644 index 0000000000..c07528270b --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json @@ -0,0 +1,14 @@ +{ + "id": "6526551968265354277", + "insertTime": "2013-06-26T20:52:00.355-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_put", + "operationType": "update", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", + "startTime": "2013-06-26T20:52:00.410-07:00", + "status": "PENDING", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_firewalls_post.json b/libcloud/test/compute/fixtures/gce/global_firewalls_post.json new file mode 100644 index 0000000000..84124f3e76 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_firewalls_post.json @@ -0,0 +1,13 @@ +{ + "id": "16789512465352307784", + "insertTime": "2013-06-26T20:51:06.068-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_post", + "startTime": "2013-06-26T20:51:06.128-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_images.json b/libcloud/test/compute/fixtures/gce/global_images.json new file mode 100644 index 0000000000..c472dfafeb --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_images.json @@ -0,0 +1,22 @@ +{ + "id": "projects/project_name/global/images", + "items": [ + { + "creationTimestamp": "2013-06-19T13:47:20.563-07:00", + "description": "Local Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", + "id": "1549141992333368759", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130617", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "sourceType": "RAW", + "status": "READY" + } + ], + "kind": "compute#imageList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/images" +} diff --git a/libcloud/test/compute/fixtures/gce/global_images.json.save b/libcloud/test/compute/fixtures/gce/global_images.json.save new file mode 100644 index 0000000000..c472dfafeb --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_images.json.save @@ -0,0 +1,22 @@ +{ + "id": "projects/project_name/global/images", + "items": [ + { + "creationTimestamp": "2013-06-19T13:47:20.563-07:00", + "description": "Local Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", + "id": "1549141992333368759", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130617", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "sourceType": "RAW", + "status": "READY" + } + ], + "kind": "compute#imageList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/images" +} diff --git a/libcloud/test/compute/fixtures/gce/global_networks.json b/libcloud/test/compute/fixtures/gce/global_networks.json new file mode 100644 index 0000000000..071d1dc1c8 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks.json @@ -0,0 +1,34 @@ +{ + "id": "projects/project_name/global/networks", + "items": [ + { + "IPv4Range": "10.240.0.0/16", + "creationTimestamp": "2013-06-19T12:37:13.233-07:00", + "gatewayIPv4": "10.240.0.1", + "id": "08257021638942464470", + "kind": "compute#network", + "name": "default", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default" + }, + { + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:51:34.018-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "13254259054875092094", + "kind": "compute#network", + "name": "libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-europe-network" + }, + { + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:48:15.703-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "17172579178188075621", + "kind": "compute#network", + "name": "libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-network" + } + ], + "kind": "compute#networkList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_default.json b/libcloud/test/compute/fixtures/gce/global_networks_default.json new file mode 100644 index 0000000000..a6353e8f17 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_default.json @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.240.0.0/16", + "creationTimestamp": "2013-06-19T12:37:13.233-07:00", + "gatewayIPv4": "10.240.0.1", + "id": "08257021638942464470", + "kind": "compute#network", + "name": "default", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json b/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json new file mode 100644 index 0000000000..b615cadd56 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.11.0.0/16", + "creationTimestamp": "2013-06-26T10:05:03.500-07:00", + "gatewayIPv4": "10.11.0.1", + "id": "16211908079305042870", + "kind": "compute#network", + "name": "lcnetwork", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/lcnetwork" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json b/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json new file mode 100644 index 0000000000..cc65f00004 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json @@ -0,0 +1,14 @@ +{ + "id": "4914541423567262393", + "insertTime": "2013-06-26T10:05:11.102-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_lcnetwork_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", + "startTime": "2013-06-26T10:05:11.273-07:00", + "status": "PENDING", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json b/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json new file mode 100644 index 0000000000..93915c00cc --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:51:34.018-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "13254259054875092094", + "kind": "compute#network", + "name": "libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-europe-network" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json b/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json new file mode 100644 index 0000000000..55b9be1613 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:48:15.703-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "17172579178188075621", + "kind": "compute#network", + "name": "libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/libcloud-demo-network" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/global_networks_post.json b/libcloud/test/compute/fixtures/gce/global_networks_post.json new file mode 100644 index 0000000000..72fca77516 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/global_networks_post.json @@ -0,0 +1,13 @@ +{ + "id": "3681664092089171723", + "insertTime": "2013-06-26T10:05:03.271-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_networks_post", + "startTime": "2013-06-26T10:05:03.315-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json new file mode 100644 index 0000000000..56c6db60ca --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:00.978-07:00", + "id": "8983098895755095934", + "insertTime": "2013-06-26T10:04:53.453-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", + "startTime": "2013-06-26T10:04:53.508-07:00", + "status": "DONE", + "targetId": "0565629596395414121", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json new file mode 100644 index 0000000000..e50185c1cf --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T20:52:10.075-07:00", + "id": "6526551968265354277", + "insertTime": "2013-06-26T20:52:00.355-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_put", + "operationType": "update", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", + "startTime": "2013-06-26T20:52:00.410-07:00", + "status": "DONE", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json new file mode 100644 index 0000000000..57623d9cd7 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T20:51:12.108-07:00", + "id": "16789512465352307784", + "insertTime": "2013-06-26T20:51:06.068-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_firewalls_post", + "startTime": "2013-06-26T20:51:06.128-07:00", + "status": "DONE", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json new file mode 100644 index 0000000000..a5e75cfe25 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:12.607-07:00", + "id": "4914541423567262393", + "insertTime": "2013-06-26T10:05:11.102-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_lcnetwork_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", + "startTime": "2013-06-26T10:05:11.273-07:00", + "status": "DONE", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json new file mode 100644 index 0000000000..d0a989e47f --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "insertTime": "2013-06-26T10:05:03.271-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/operations/operation-global_networks_post", + "startTime": "2013-06-26T10:05:03.315-07:00", + "status": "DONE", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json new file mode 100644 index 0000000000..a138e5cf99 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json @@ -0,0 +1,15 @@ +{ + "id": "7128783508312083402", + "insertTime": "2013-06-26T12:21:44.075-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_lcaddress_delete", + "operationType": "delete", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", + "startTime": "2013-06-26T12:21:44.110-07:00", + "status": "DONE", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json new file mode 100644 index 0000000000..baf5d57b1d --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json @@ -0,0 +1,15 @@ +{ + "id": "16064059851942653139", + "insertTime": "2013-06-26T12:21:40.299-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_post", + "operationType": "insert", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", + "startTime": "2013-06-26T12:21:40.358-07:00", + "status": "DONE", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json new file mode 100644 index 0000000000..96ffebc168 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json @@ -0,0 +1,25 @@ +{ + "error": { + "errors": [ + { + "code": "RESOURCE_ALREADY_EXISTS", + "message": "The resource 'projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node' already exists" + } + ] + }, + "httpErrorMessage": "CONFLICT", + "httpErrorStatusCode": 409, + "id": "1510575454210533141", + "insertTime": "2013-06-26T20:57:34.366-07:00", + "kind": "compute#operation", + "name": "operation-zones_europe-west1-a_instances_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", + "startTime": "2013-06-26T20:57:34.453-07:00", + "status": "DONE", + "targetId": "14308265828754333159", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json new file mode 100644 index 0000000000..e71f2a3c41 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json @@ -0,0 +1,15 @@ +{ + "id": "06887337364510109333", + "insertTime": "2013-06-26T10:06:11.835-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", + "startTime": "2013-06-26T10:06:12.006-07:00", + "status": "DONE", + "targetId": "16109451798967042451", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json new file mode 100644 index 0000000000..122a7a88f2 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:25.375-07:00", + "id": "0211151278250678078", + "insertTime": "2013-06-26T16:48:17.403-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", + "startTime": "2013-06-26T16:48:17.479-07:00", + "status": "DONE", + "targetId": "03196637868764498730", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json new file mode 100644 index 0000000000..524fe81d89 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:36.800-07:00", + "id": "3319596145594427549", + "insertTime": "2013-06-26T16:13:12.903-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", + "startTime": "2013-06-26T16:13:12.948-07:00", + "status": "DONE", + "targetId": "5390075309006132922", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json new file mode 100644 index 0000000000..369e9d28c8 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:56.931-07:00", + "id": "17469711273432628502", + "insertTime": "2013-06-26T16:13:40.579-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", + "startTime": "2013-06-26T16:13:40.620-07:00", + "status": "DONE", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json new file mode 100644 index 0000000000..1563c39d9a --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:31.831-07:00", + "id": "7455886659787654716", + "insertTime": "2013-06-26T16:48:27.691-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "operationType": "attachDisk", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "startTime": "2013-06-26T16:48:27.762-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json new file mode 100644 index 0000000000..8e728e7fd4 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T10:06:00.917-07:00", + "id": "6999931397447918763", + "insertTime": "2013-06-26T10:05:40.350-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", + "startTime": "2013-06-26T10:05:40.405-07:00", + "status": "DONE", + "targetId": "07410051435384876224", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json new file mode 100644 index 0000000000..fdf2a75605 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:41.278-07:00", + "id": "3921383727105838816", + "insertTime": "2013-06-26T16:48:35.357-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "operationType": "detachDisk", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "startTime": "2013-06-26T16:48:35.398-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json new file mode 100644 index 0000000000..28d87efe41 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json @@ -0,0 +1,15 @@ +{ + "id": "10507122129283663728", + "insertTime": "2013-06-26T15:03:02.766-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_reset_post", + "operationType": "reset", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", + "startTime": "2013-06-26T15:03:02.813-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json new file mode 100644 index 0000000000..14c26f1394 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T21:20:10.487-07:00", + "id": "8115150846190320932", + "insertTime": "2013-06-26T21:20:03.962-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", + "operationType": "setTags", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", + "startTime": "2013-06-26T21:20:04.103-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json new file mode 100644 index 0000000000..ab62f88fa9 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:08.382-07:00", + "id": "1858155812259649243", + "insertTime": "2013-06-26T16:12:51.492-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", + "startTime": "2013-06-26T16:12:51.537-07:00", + "status": "DONE", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/project.json b/libcloud/test/compute/fixtures/gce/project.json new file mode 100644 index 0000000000..b8b746cfb9 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/project.json @@ -0,0 +1,74 @@ +{ + "commonInstanceMetadata": { + "items": [ + { + "key": "sshKeys", + "value": "ASDFASDF" + } + ], + "kind": "compute#metadata" + }, + "creationTimestamp": "2013-02-05T16:19:20.516-08:00", + "description": "", + "id": "2193465259114366848", + "kind": "compute#project", + "name": "project_name", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 7.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 7.0 + }, + { + "limit": 8.0, + "metric": "EPHEMERAL_ADDRESSES", + "usage": 7.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 3.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 30.0 + }, + { + "limit": 1000.0, + "metric": "SNAPSHOTS", + "usage": 0.0 + }, + { + "limit": 5.0, + "metric": "NETWORKS", + "usage": 3.0 + }, + { + "limit": 100.0, + "metric": "FIREWALLS", + "usage": 4.0 + }, + { + "limit": 100.0, + "metric": "IMAGES", + "usage": 0.0 + }, + { + "limit": 7.0, + "metric": "STATIC_ADDRESSES", + "usage": 3.0 + }, + { + "limit": 100.0, + "metric": "ROUTES", + "usage": 6.0 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json b/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json new file mode 100644 index 0000000000..461b0c92f0 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json @@ -0,0 +1,157 @@ +{ + "id": "projects/debian-cloud/global/images", + "items": [ + { + "creationTimestamp": "2013-05-07T17:09:22.111-07:00", + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-07", + "id": "647943287916432906", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130507", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130225", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-6-squeeze-v20130507", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-09T12:56:21.720-07:00", + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-09", + "id": "15745758816845911589", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130509", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130225", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-6-squeeze-v20130509", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-14T21:01:12.124-07:00", + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-15", + "id": "006866479348046290", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130515", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130515", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-6-squeeze-v20130515", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-30T09:48:37.837-07:00", + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-22", + "id": "1266148899538866390", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130522", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130522", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-6-squeeze-v20130522", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-06-19T13:45:44.111-07:00", + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-06-17", + "id": "04009358257173422091", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130617", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-6-squeeze-v20130617", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-07T17:01:30.071-07:00", + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-07", + "id": "15638477823580670459", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130507", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130225", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130507", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-09T12:56:47.910-07:00", + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-09", + "id": "020034532765408091", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130509", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130225", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130509", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-14T21:02:55.044-07:00", + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-15", + "id": "0587071888358410836", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130515", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130515", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130515", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-05-30T09:47:30.980-07:00", + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-22", + "id": "622079684385221180", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130522", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130522", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130522", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-06-19T13:47:20.563-07:00", + "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", + "id": "1549141992333368759", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130617", + "preferredKernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "sourceType": "RAW", + "status": "READY" + } + ], + "kind": "compute#imageList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json new file mode 100644 index 0000000000..a75d6339c2 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json @@ -0,0 +1,29 @@ +{ + "id": "projects/project_name/regions/us-central1/addresses", + "items": [ + { + "address": "108.59.82.4", + "creationTimestamp": "2013-06-26T09:48:31.184-07:00", + "description": "", + "id": "17634862894218443422", + "kind": "compute#address", + "name": "libcloud-demo-address", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", + "status": "RESERVED" + }, + { + "address": "173.255.114.104", + "creationTimestamp": "2013-06-04T16:28:43.764-07:00", + "description": "", + "id": "11879548153827627972", + "kind": "compute#address", + "name": "testaddress", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/testaddress", + "status": "RESERVED" + } + ], + "kind": "compute#addressList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json new file mode 100644 index 0000000000..5d2838e8db --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json @@ -0,0 +1,11 @@ +{ + "address": "173.255.113.20", + "creationTimestamp": "2013-06-26T12:21:40.625-07:00", + "description": "", + "id": "01531551729918243104", + "kind": "compute#address", + "name": "lcaddress", + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "status": "RESERVED" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json new file mode 100644 index 0000000000..afa45083ab --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json @@ -0,0 +1,15 @@ +{ + "id": "7128783508312083402", + "insertTime": "2013-06-26T12:21:44.075-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_lcaddress_delete", + "operationType": "delete", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", + "startTime": "2013-06-26T12:21:44.110-07:00", + "status": "PENDING", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json new file mode 100644 index 0000000000..1242c94ec6 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json @@ -0,0 +1,14 @@ +{ + "id": "16064059851942653139", + "insertTime": "2013-06-26T12:21:40.299-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_post", + "operationType": "insert", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", + "startTime": "2013-06-26T12:21:40.358-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones.json b/libcloud/test/compute/fixtures/gce/zones.json new file mode 100644 index 0000000000..2c45d56d9f --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones.json @@ -0,0 +1,207 @@ +{ + "id": "projects/project_name/zones", + "items": [ + { + "creationTimestamp": "2013-02-05T16:19:23.254-08:00", + "description": "europe-west1-a", + "id": "13416642339679437530", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-08-03T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-08-18T12:00:00.000-07:00", + "name": "2013-08-03-planned-outage" + } + ], + "name": "europe-west1-a", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 3.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 3.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 1.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 10.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.254-08:00", + "description": "europe-west1-b", + "id": "20623650177407096", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-09-28T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-10-13T12:00:00.000-07:00", + "name": "2013-09-28-planned-outage" + } + ], + "name": "europe-west1-b", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 0.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 0.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-b", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-a", + "id": "13462829244527433283", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-08-17T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-09-01T12:00:00.000-07:00", + "name": "2013-08-17-planned-outage" + } + ], + "name": "us-central1-a", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 4.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 4.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 2.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 20.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-b", + "id": "1045862591201432620", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-10-26T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-11-10T12:00:00.000-08:00", + "name": "2013-10-26-planned-outage" + } + ], + "name": "us-central1-b", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 0.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 0.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-b", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.257-08:00", + "description": "us-central2-a", + "id": "1001467574647549152", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-10-12T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-10-27T12:00:00.000-07:00", + "name": "2013-10-12-planned-outage" + } + ], + "name": "us-central2-a", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 0.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 0.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 0.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central2-a", + "status": "UP" + } + ], + "kind": "compute#zoneList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json new file mode 100644 index 0000000000..5965c46432 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json @@ -0,0 +1,145 @@ +{ + "id": "projects/project_name/zones/europe-west1-a/instances", + "items": [ + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:13:38.295-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "4658881585544531189", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.167", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.144.78" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:12:29.726-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "14308265828754333159", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.88", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.66.77" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:13:21.549-07:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "0681789716029574243", + "kernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.29.121", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.206.91" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" + } + ], + "kind": "compute#instanceList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json new file mode 100644 index 0000000000..8a5813ddef --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json @@ -0,0 +1,15 @@ +{ + "id": "1510575454210533141", + "insertTime": "2013-06-26T20:57:34.366-07:00", + "kind": "compute#operation", + "name": "operation-zones_europe-west1-a_instances_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", + "startTime": "2013-06-26T20:57:34.453-07:00", + "status": "PENDING", + "targetId": "14308265828754333159", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json new file mode 100644 index 0000000000..f66e26c1ac --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json @@ -0,0 +1,14 @@ +{ + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "zone": "europe-west1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json new file mode 100644 index 0000000000..5fae31395c --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json @@ -0,0 +1,40 @@ +{ + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-a", + "id": "13462829244527433283", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-08-17T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2013-09-01T12:00:00.000-07:00", + "name": "2013-08-17-planned-outage" + } + ], + "name": "us-central1-a", + "quotas": [ + { + "limit": 8.0, + "metric": "INSTANCES", + "usage": 4.0 + }, + { + "limit": 8.0, + "metric": "CPUS", + "usage": 4.0 + }, + { + "limit": 8.0, + "metric": "DISKS", + "usage": 2.0 + }, + { + "limit": 1024.0, + "metric": "DISKS_TOTAL_GB", + "usage": 20.0 + } + ], + "region": "https://www.googleapis.com/compute/v1beta15/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a", + "status": "UP" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json new file mode 100644 index 0000000000..fad39f7ab4 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json @@ -0,0 +1,37 @@ +{ + "id": "projects/project_name/zones/us-central1-a/disks", + "items": [ + { + "creationTimestamp": "2013-06-26T10:06:04.007-07:00", + "id": "16109451798967042451", + "kind": "compute#disk", + "name": "lcdisk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "creationTimestamp": "2013-06-26T09:47:09.178-07:00", + "id": "10880026303683859871", + "kind": "compute#disk", + "name": "libcloud-demo-boot-disk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/libcloud-demo-boot-disk", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "creationTimestamp": "2013-06-25T10:57:34.305-07:00", + "id": "14383387450728762434", + "kind": "compute#disk", + "name": "test-disk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/test-disk", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + } + ], + "kind": "compute#diskList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json new file mode 100644 index 0000000000..85d7e31427 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json @@ -0,0 +1,10 @@ +{ + "creationTimestamp": "2013-06-26T10:06:04.007-07:00", + "id": "16109451798967042451", + "kind": "compute#disk", + "name": "lcdisk", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json new file mode 100644 index 0000000000..024da2a086 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json @@ -0,0 +1,15 @@ +{ + "id": "06887337364510109333", + "insertTime": "2013-06-26T10:06:11.835-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", + "startTime": "2013-06-26T10:06:12.006-07:00", + "status": "PENDING", + "targetId": "16109451798967042451", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json new file mode 100644 index 0000000000..51cf6c0709 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json @@ -0,0 +1,14 @@ +{ + "id": "0211151278250678078", + "insertTime": "2013-06-26T16:48:17.403-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", + "startTime": "2013-06-26T16:48:17.479-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json new file mode 100644 index 0000000000..1cbb5c6a7d --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json @@ -0,0 +1,232 @@ +{ + "id": "projects/project_name/zones/us-central1-a/instances", + "items": [ + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:00:12.021-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "1845312225624811608", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.115.146", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.113.94" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:19.247-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "8573880455005118258", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.107", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.224.165" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:19.662-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "17221721898919682654", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.166", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.223.109" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-multiple-nodes-001", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:10:09.700-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "03138438763739542377", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.80.244", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.147.18" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:11:02.386-07:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/disks/libcloud-demo-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "2378270030714524465", + "kernel": "https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130603", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.66", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.192.190" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/libcloud-demo-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" + } + ], + "kind": "compute#instanceList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances" +} diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json new file mode 100644 index 0000000000..0e5ef0d6c2 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json @@ -0,0 +1,42 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-06-26T16:12:30.443-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "5390075309006132922", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "lcnode-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.107", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.106.153" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json new file mode 100644 index 0000000000..b588ffe7bf --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json @@ -0,0 +1,15 @@ +{ + "id": "3319596145594427549", + "insertTime": "2013-06-26T16:13:12.903-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", + "startTime": "2013-06-26T16:13:12.948-07:00", + "status": "PENDING", + "targetId": "5390075309006132922", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json new file mode 100644 index 0000000000..203e261510 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json @@ -0,0 +1,42 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-06-26T16:12:51.782-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "16630486471904253898", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "lcnode-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "108.59.81.166", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.96.232" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json new file mode 100644 index 0000000000..dcbed4c382 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json @@ -0,0 +1,15 @@ +{ + "id": "17469711273432628502", + "insertTime": "2013-06-26T16:13:40.579-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", + "startTime": "2013-06-26T16:13:40.620-07:00", + "status": "PENDING", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json new file mode 100644 index 0000000000..396cff1a9a --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json @@ -0,0 +1,42 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-06-26T15:00:12.021-07:00", + "disks": [ + { + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "type": "SCRATCH" + } + ], + "id": "1845312225624811608", + "image": "https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.115.146", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1beta15/projects/project_name/global/networks/default", + "networkIP": "10.240.113.94" + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json new file mode 100644 index 0000000000..01b756b080 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json @@ -0,0 +1,15 @@ +{ + "id": "7455886659787654716", + "insertTime": "2013-06-26T16:48:27.691-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "operationType": "attachDisk", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "startTime": "2013-06-26T16:48:27.762-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json new file mode 100644 index 0000000000..a6542aac38 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json @@ -0,0 +1,15 @@ +{ + "id": "6999931397447918763", + "insertTime": "2013-06-26T10:05:40.350-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", + "startTime": "2013-06-26T10:05:40.405-07:00", + "status": "PENDING", + "targetId": "07410051435384876224", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json new file mode 100644 index 0000000000..2595e523d0 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json @@ -0,0 +1,15 @@ +{ + "id": "3921383727105838816", + "insertTime": "2013-06-26T16:48:35.357-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "operationType": "detachDisk", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "startTime": "2013-06-26T16:48:35.398-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json new file mode 100644 index 0000000000..94bc8dda3e --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json @@ -0,0 +1,15 @@ +{ + "id": "10507122129283663728", + "insertTime": "2013-06-26T15:03:02.766-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_reset_post", + "operationType": "reset", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", + "startTime": "2013-06-26T15:03:02.813-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json new file mode 100644 index 0000000000..23ee9ccd7c --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json @@ -0,0 +1,15 @@ +{ + "id": "8115150846190320932", + "insertTime": "2013-06-26T21:20:03.962-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", + "operationType": "setTags", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", + "startTime": "2013-06-26T21:20:04.103-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json new file mode 100644 index 0000000000..d03c4b0472 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json @@ -0,0 +1,14 @@ +{ + "id": "1858155812259649243", + "insertTime": "2013-06-26T16:12:51.492-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", + "startTime": "2013-06-26T16:12:51.537-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json new file mode 100644 index 0000000000..3b0b2a52e1 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json @@ -0,0 +1,374 @@ +{ + "id": "projects/project_name/zones/us-central1-a/machineTypes", + "items": [ + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/g1-small", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:42.334-07:00", + "description": "8 vCPUs, 30 GB RAM", + "guestCpus": 8, + "id": "04084282969223214132", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 30720, + "name": "n1-standard-8", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:51:19.936-07:00", + "description": "8 vCPUs, 30 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "00035824420671580077", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 30720, + "name": "n1-standard-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8-d", + "zone": "us-central1-a" + } + ], + "kind": "compute#machineTypeList", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes" +} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json new file mode 100644 index 0000000000..736f4bb777 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json @@ -0,0 +1,14 @@ +{ + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" +} \ No newline at end of file diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py new file mode 100644 index 0000000000..d991f6249f --- /dev/null +++ b/libcloud/test/compute/test_gce.py @@ -0,0 +1,703 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for Google Compute Engine Driver +""" +import sys +import unittest +import datetime + +from libcloud.utils.py3 import httplib +from libcloud.compute.drivers.gce import (GCENodeDriver, API_VERSION, + timestamp_to_datetime, + GCEAddress, GCEFirewall, GCENetwork, + GCENodeSize, GCEProject, GCEZone, + GCEError, ResourceExistsError, + QuotaExceededError) +from libcloud.common.google import (GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleBaseConnection) +from libcloud.test.common.test_google import GoogleAuthMockHttp +from libcloud.compute.base import (Node, NodeImage, NodeSize, NodeLocation, + StorageVolume) + +from libcloud.test import MockHttpTestCase, LibcloudTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS + + +class GCENodeDriverTest(LibcloudTestCase, TestCaseMixin): + """ + Google Compute Engine Test Class. + """ + # Mock out a few specific calls that interact with the user, system or + # environment. + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + GCEZone._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0) + datacenter = 'us-central1-a' + + def setUp(self): + GCEMockHttp.test = self + GCENodeDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp) + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + GCEMockHttp.type = None + kwargs = GCE_KEYWORD_PARAMS.copy() + kwargs['auth_type'] = 'IA' + kwargs['datacenter'] = self.datacenter + self.driver = GCENodeDriver(*GCE_PARAMS, **kwargs) + + def test_timestamp_to_datetime(self): + timestamp1 = '2013-06-26T10:05:19.340-07:00' + datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19) + self.assertEqual(timestamp_to_datetime(timestamp1), datetime1) + timestamp2 = '2013-06-26T17:43:15.000-00:00' + datetime2 = datetime.datetime(2013, 6, 26, 17, 43, 15) + self.assertEqual(timestamp_to_datetime(timestamp2), datetime2) + + def test_find_zone(self): + zone1 = self.driver._find_zone('libcloud-demo-np-node', 'instances') + self.assertEqual(zone1, 'us-central1-a') + zone2 = self.driver._find_zone('libcloud-demo-europe-np-node', + 'instances') + self.assertEqual(zone2, 'europe-west1-a') + region = self.driver._find_zone('libcloud-demo-address', 'addresses', + region=True) + self.assertEqual(region, 'us-central1') + + def test_match_images(self): + project = 'debian-cloud' + image = self.driver._match_images(project, 'debian-7') + self.assertEqual(image.name, 'debian-7-wheezy-v20130617') + image = self.driver._match_images(project, 'debian-6') + self.assertEqual(image.name, 'debian-6-squeeze-v20130617') + + def test_ex_list_addresses(self): + address_list = self.driver.ex_list_addresses() + address_list_all = self.driver.ex_list_addresses('all') + address_list_uc1 = self.driver.ex_list_addresses('us-central1') + self.assertEqual(len(address_list), 2) + self.assertEqual(len(address_list_all), 4) + self.assertEqual(address_list[0].name, 'libcloud-demo-address') + self.assertEqual(address_list_uc1[0].name, 'libcloud-demo-address') + self.assertEqual(address_list_all[0].name, 'lcaddress') + + def test_ex_list_firewalls(self): + firewalls = self.driver.ex_list_firewalls() + self.assertEqual(len(firewalls), 4) + self.assertEqual(firewalls[0].name, 'default-allow-internal') + + def test_list_images(self): + local_images = self.driver.list_images() + debian_images = self.driver.list_images(ex_project='debian-cloud') + self.assertEqual(len(local_images), 1) + self.assertEqual(len(debian_images), 10) + self.assertEqual(local_images[0].name, 'debian-7-wheezy-v20130617') + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 5) + self.assertEqual(locations[0].name, 'europe-west1-a') + + def test_ex_list_networks(self): + networks = self.driver.ex_list_networks() + self.assertEqual(len(networks), 3) + self.assertEqual(networks[0].name, 'default') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + nodes_all = self.driver.list_nodes(ex_zone='all') + nodes_uc1a = self.driver.list_nodes(ex_zone='us-central1-a') + self.assertEqual(len(nodes), 5) + self.assertEqual(len(nodes_all), 8) + self.assertEqual(len(nodes_uc1a), 5) + self.assertEqual(nodes[0].name, 'node-name') + self.assertEqual(nodes_uc1a[0].name, 'node-name') + self.assertEqual(nodes_all[0].name, 'libcloud-demo-persist-node') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + sizes_all = self.driver.list_sizes('all') + self.assertEqual(len(sizes), 22) + self.assertEqual(len(sizes_all), 100) + self.assertEqual(sizes[0].name, 'f1-micro') + self.assertEqual(sizes[0].extra['zone'].name, 'us-central1-a') + self.assertEqual(sizes_all[0].name, 'n1-highmem-8') + self.assertEqual(sizes_all[0].extra['zone'].name, 'us-central1-a') + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + volumes_all = self.driver.list_volumes('all') + volumes_uc1a = self.driver.list_volumes('us-central1-a') + self.assertEqual(len(volumes), 3) + self.assertEqual(len(volumes_all), 3) + self.assertEqual(len(volumes_uc1a), 3) + self.assertEqual(volumes[0].name, 'lcdisk') + self.assertEqual(volumes_all[0].name, 'test-disk') + self.assertEqual(volumes_uc1a[0].name, 'lcdisk') + + def test_ex_list_zones(self): + zones = self.driver.ex_list_zones() + self.assertEqual(len(zones), 5) + self.assertEqual(zones[0].name, 'europe-west1-a') + + def test_ex_create_address(self): + address_name = 'lcaddress' + address = self.driver.ex_create_address(address_name) + self.assertTrue(isinstance(address, GCEAddress)) + self.assertEqual(address.name, address_name) + + def test_ex_create_firewall(self): + firewall_name = 'lcfirewall' + allowed = [{'IPProtocol': 'tcp', 'ports': ['4567']}] + source_tags = ['libcloud'] + firewall = self.driver.ex_create_firewall(firewall_name, allowed, + source_tags=source_tags) + self.assertTrue(isinstance(firewall, GCEFirewall)) + self.assertEqual(firewall.name, firewall_name) + + def test_ex_create_network(self): + network_name = 'lcnetwork' + cidr = '10.11.0.0/16' + network = self.driver.ex_create_network(network_name, cidr) + self.assertTrue(isinstance(network, GCENetwork)) + self.assertEqual(network.name, network_name) + self.assertEqual(network.cidr, cidr) + + def test_create_node_req(self): + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + location = self.driver.zone + network = self.driver.ex_get_network('default') + tags = ['libcloud'] + metadata = [{'key': 'test_key', 'value': 'test_value'}] + boot_disk = self.driver.ex_get_volume('lcdisk') + node_request, node_data = self.driver._create_node_req('lcnode', size, + image, location, + network, tags, + metadata, + boot_disk) + self.assertEqual(node_request, '/zones/%s/instances' % location.name) + self.assertEqual(node_data['metadata'][0]['key'], 'test_key') + self.assertEqual(node_data['tags']['items'][0], 'libcloud') + self.assertEqual(node_data['name'], 'lcnode') + self.assertTrue(node_data['disks'][0]['boot']) + + def test_create_node(self): + node_name = 'node-name' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + node = self.driver.create_node(node_name, size, image) + self.assertTrue(isinstance(node, Node)) + self.assertEqual(node.name, node_name) + + def test_create_node_existing(self): + node_name = 'libcloud-demo-europe-np-node' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1', zone='europe-west1-a') + self.assertRaises(ResourceExistsError, self.driver.create_node, + node_name, size, image, location='europe-west1-a') + + def test_ex_create_multiple_nodes(self): + base_name = 'lcnode' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + number = 2 + nodes = self.driver.ex_create_multiple_nodes(base_name, size, image, + number) + self.assertEqual(len(nodes), 2) + self.assertTrue(isinstance(nodes[0], Node)) + self.assertTrue(isinstance(nodes[1], Node)) + self.assertEqual(nodes[0].name, '%s-000' % base_name) + self.assertEqual(nodes[1].name, '%s-001' % base_name) + + def test_create_volume(self): + volume_name = 'lcdisk' + size = 1 + volume = self.driver.create_volume(size, volume_name) + self.assertTrue(isinstance(volume, StorageVolume)) + self.assertEqual(volume.name, volume_name) + + def test_ex_update_firewall(self): + firewall_name = 'lcfirewall' + firewall = self.driver.ex_get_firewall(firewall_name) + firewall.source_ranges = ['10.0.0.0/16'] + firewall.source_tags = ['libcloud', 'test'] + firewall2 = self.driver.ex_update_firewall(firewall) + self.assertTrue(isinstance(firewall2, GCEFirewall)) + + def test_reboot_node(self): + node = self.driver.ex_get_node('node-name') + reboot = self.driver.reboot_node(node) + self.assertTrue(reboot) + + def test_ex_set_node_tags(self): + new_tags = ['libcloud'] + node = self.driver.ex_get_node('node-name') + set_tags = self.driver.ex_set_node_tags(node, new_tags) + self.assertTrue(set_tags) + + def test_attach_volume(self): + volume = self.driver.ex_get_volume('lcdisk') + node = self.driver.ex_get_node('node-name') + attach = volume.attach(node) + self.assertTrue(attach) + + def test_detach_volume(self): + volume = self.driver.ex_get_volume('lcdisk') + node = self.driver.ex_get_node('node-name') + # This fails since the node is required + detach = volume.detach() + self.assertFalse(detach) + # This should pass + detach = self.driver.detach_volume(volume, node) + self.assertTrue(detach) + + def test_ex_destroy_address(self): + address = self.driver.ex_get_address('lcaddress') + destroyed = address.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_firewall(self): + firewall = self.driver.ex_get_firewall('lcfirewall') + destroyed = firewall.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_network(self): + network = self.driver.ex_get_network('lcnetwork') + destroyed = network.destroy() + self.assertTrue(destroyed) + + def test_destroy_node(self): + node = self.driver.ex_get_node('node-name') + destroyed = node.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_multiple_nodes(self): + nodes = [] + nodes.append(self.driver.ex_get_node('lcnode-000')) + nodes.append(self.driver.ex_get_node('lcnode-001')) + destroyed = self.driver.ex_destroy_multiple_nodes(nodes) + for d in destroyed: + self.assertTrue(d) + + def test_destroy_volume(self): + address = self.driver.ex_get_address('lcaddress') + destroyed = address.destroy() + self.assertTrue(destroyed) + + def test_ex_get_address(self): + address_name = 'lcaddress' + address = self.driver.ex_get_address(address_name) + self.assertEqual(address.name, address_name) + self.assertEqual(address.address, '173.255.113.20') + self.assertEqual(address.region, 'us-central1') + self.assertEqual(address.extra['status'], 'RESERVED') + + def test_ex_get_firewall(self): + firewall_name = 'lcfirewall' + firewall = self.driver.ex_get_firewall(firewall_name) + self.assertEqual(firewall.name, firewall_name) + self.assertEqual(firewall.network.name, 'default') + self.assertEqual(firewall.source_tags, ['libcloud']) + + def test_ex_get_image(self): + partial_name = 'debian-7' + image = self.driver.ex_get_image(partial_name) + self.assertEqual(image.name, 'debian-7-wheezy-v20130617') + # A 'debian-7' image exists in the local project + self.assertTrue(image.extra['description'].startswith('Local')) + + partial_name = 'debian-6' + image = self.driver.ex_get_image(partial_name) + self.assertEqual(image.name, 'debian-6-squeeze-v20130617') + self.assertTrue(image.extra['description'].startswith('Debian')) + + def test_ex_get_network(self): + network_name = 'lcnetwork' + network = self.driver.ex_get_network(network_name) + self.assertEqual(network.name, network_name) + self.assertEqual(network.cidr, '10.11.0.0/16') + self.assertEqual(network.extra['gatewayIPv4'], '10.11.0.1') + + def test_ex_get_project(self): + project = self.driver.ex_get_project() + self.assertEqual(project.name, 'project_name') + instances_quota = project.quotas[0] + self.assertEqual(instances_quota['usage'], 7.0) + self.assertEqual(instances_quota['limit'], 8.0) + + def test_ex_get_size(self): + size_name = 'n1-standard-1' + size = self.driver.ex_get_size(size_name) + self.assertEqual(size.name, size_name) + self.assertEqual(size.extra['zone'].name, 'us-central1-a') + self.assertEqual(size.disk, 10) + self.assertEqual(size.ram, 3840) + self.assertEqual(size.extra['guestCpus'], 1) + + def test_ex_get_volume(self): + volume_name = 'lcdisk' + volume = self.driver.ex_get_volume(volume_name) + self.assertEqual(volume.name, volume_name) + self.assertEqual(volume.size, '1') + self.assertEqual(volume.extra['status'], 'READY') + + def test_ex_get_zone(self): + zone_name = 'us-central1-a' + expected_time_until = datetime.timedelta(days=52) + expected_duration = datetime.timedelta(days=15) + zone = self.driver.ex_get_zone(zone_name) + self.assertEqual(zone.name, zone_name) + self.assertEqual(zone.time_until_mw, expected_time_until) + self.assertEqual(zone.next_mw_duration, expected_duration) + + +class GCEMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('gce') + json_hdr = {'content-type': 'application/json; charset=UTF-8'} + + def _get_method_name(self, type, use_param, qs, path): + api_path = '/compute/%s' % API_VERSION + project_path = '/projects/%s' % GCE_KEYWORD_PARAMS['project'] + path = path.replace(api_path, '') + # This replace is separate, since there is a call with a different + # project name + path = path.replace(project_path, '') + # The path to get project information is the base path, so use a fake + # '/project' path instead + if not path: + path = '/project' + method_name = super(GCEMockHttp, self)._get_method_name(type, + use_param, + qs, path) + return method_name + + def _aggregated_addresses(self, method, url, body, headers): + body = self.fixtures.load('aggregated_addresses.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_disks(self, method, url, body, headers): + body = self.fixtures.load('aggregated_disks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_instances(self, method, url, body, headers): + body = self.fixtures.load('aggregated_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_machineTypes(self, method, url, body, headers): + body = self.fixtures.load('aggregated_machineTypes.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_firewalls(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_firewalls_post.json') + else: + body = self.fixtures.load('global_firewalls.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_firewalls_lcfirewall(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'global_firewalls_lcfirewall_delete.json') + elif method == 'PUT': + body = self.fixtures.load('global_firewalls_lcfirewall_put.json') + else: + body = self.fixtures.load('global_firewalls_lcfirewall.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_images(self, method, url, body, headers): + body = self.fixtures.load('global_images.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_networks_post.json') + else: + body = self.fixtures.load('global_networks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_default(self, method, url, body, headers): + body = self.fixtures.load('global_networks_default.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_libcloud_demo_network(self, method, url, body, + headers): + body = self.fixtures.load('global_networks_libcloud-demo-network.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_libcloud_demo_europe_network(self, method, url, body, + headers): + body = self.fixtures.load( + 'global_networks_libcloud-demo-europe-network.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_lcnetwork(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load('global_networks_lcnetwork_delete.json') + else: + body = self.fixtures.load('global_networks_lcnetwork.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_lcfirewall_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_lcfirewall_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_lcfirewall_put( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_lcfirewall_put.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_networks_lcnetwork_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_networks_lcnetwork_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_networks_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_networks_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_addresses_lcaddress_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_addresses_lcaddress_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_addresses_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_addresses_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_disks_lcdisk_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_disks_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_000_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_001_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_attachDisk_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_detachDisk_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_setTags_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_reset_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_reset_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_operations_operation_zones_europe_west1_a_instances_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_europe-west1-a_instances_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _project(self, method, url, body, headers): + body = self.fixtures.load('project.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _projects_debian_cloud_global_images(self, method, url, body, headers): + body = self.fixtures.load('projects_debian-cloud_global_images.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_addresses(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'regions_us-central1_addresses_post.json') + else: + body = self.fixtures.load('regions_us-central1_addresses.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_addresses_lcaddress(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'regions_us-central1_addresses_lcaddress_delete.json') + else: + body = self.fixtures.load( + 'regions_us-central1_addresses_lcaddress.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones(self, method, url, body, headers): + body = self.fixtures.load('zones.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('zones_us-central1-a_disks_post.json') + else: + body = self.fixtures.load('zones_us-central1-a_disks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_lcdisk(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_disks_lcdisk_delete.json') + else: + body = self.fixtures.load('zones_us-central1-a_disks_lcdisk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_instances(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'zones_europe-west1-a_instances_post.json') + else: + body = self.fixtures.load('zones_europe-west1-a_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'zones_us-central1-a_instances_post.json') + else: + body = self.fixtures.load('zones_us-central1-a_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_attachDisk( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_attachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_detachDisk( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_detachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_setTags( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_setTags_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_reset( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_reset_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_lcnode_000(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-000_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-000.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_lcnode_001(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-001_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-001.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a(self, method, url, body, headers): + body = self.fixtures.load('zones_us-central1-a.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_machineTypes(self, method, url, body, headers): + body = self.fixtures.load('zones_us-central1-a_machineTypes.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_machineTypes_n1_standard_1(self, method, url, + body, headers): + body = self.fixtures.load( + 'zones_europe-west1-a_machineTypes_n1-standard-1.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_machineTypes_n1_standard_1(self, method, url, + body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_machineTypes_n1-standard-1.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index cd6c83b50e..e5adce09fe 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -22,6 +22,9 @@ DREAMHOST_PARAMS = ('key',) EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) +GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +#GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication +GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') IBM_PARAMS = ('user', 'secret') # OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) From 31a148b3969559d3efd97af62cdc3479b6894a68 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 13 Jul 2013 12:33:20 +0200 Subject: [PATCH 112/143] Update changes. --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 9ca8cebbbd..941238fc7c 100644 --- a/CHANGES +++ b/CHANGES @@ -16,6 +16,9 @@ Changes with Apache Libcloud in development (LIBCLOUD-353) [Bernard Kerckenaere] + - Add new driver for Google Compute Engine (LIBCLOUD-266) + [Rick Wright] + Changes with Apache Libcloud 0.13.0: *) General From 8fd8acd8be4a72f010792c0d5ff4e9346fbe5538 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Mon, 15 Jul 2013 09:54:25 -0700 Subject: [PATCH 113/143] Added some as-yet-unimplemented methods to the base compute driver. Signed-off-by: Tomaz Muraus --- libcloud/compute/base.py | 57 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 03f0ecf60c..5014a5495a 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -403,8 +403,23 @@ def detach(self): return self.driver.detach_volume(volume=self) + def list_snapshots(self): + """ + @returns C{list} of C{VolumeSnapshot} + """ + return self.driver.list_volume_snapshots(volume=self) + + def snapshot(self, name): + """ + Creates a snapshot of this volume. + + @returns C{VolumeSnapshot} + """ + return self.driver.snapshot_volume(volume=self, name=name) + def destroy(self): - """Destroy this storage volume. + """ + Destroy this storage volume. @returns C{bool} """ @@ -416,6 +431,19 @@ def __repr__(self): self.id, self.size, self.driver.name) +class VolumeSnapshot(object): + def __init__(self, driver): + self.driver = driver + + def destroy(self): + """ + Destroys this snapshot. + + @returns C{bool} + """ + return self.driver.destroy_snapshot(snapshot=self) + + class NodeDriver(BaseDriver): """ A base NodeDriver class to derive from @@ -780,6 +808,33 @@ def list_volumes(self): raise NotImplementedError( 'list_volumes not implemented for this driver') + def list_volume_snapshots(self, volume): + """ + List snapshots for a storage volume. + + @rtype: C{list} of L{VolumeSnapshot} + """ + raise NotImplementedError( + 'list_volume_snapshots not implemented for this driver') + + def snapshot_volume(self, volume, name): + """ + Creates a snapshot of the storage volume. + + @rtype: L{VolumeSnapshot} + """ + raise NotImplementedError( + 'snapshot_volume not implemented for this driver') + + def destroy_snapshot(self, snapshot): + """ + Destroys a snapshot. + + @rtype: L{bool} + """ + raise NotImplementedError( + 'destroy_snapshot not implemented for this driver') + def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): # This is here for backward compatibility and will be removed in the From de0559a84dba8b4d02c04cb1616ec7de85138bc7 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Tue, 16 Jul 2013 12:19:09 -0700 Subject: [PATCH 114/143] Rename some methods for increased consistency. Signed-off-by: Tomaz Muraus --- libcloud/compute/base.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 5014a5495a..b5def91f41 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -415,7 +415,7 @@ def snapshot(self, name): @returns C{VolumeSnapshot} """ - return self.driver.snapshot_volume(volume=self, name=name) + return self.driver.create_volume_snapshot(volume=self, name=name) def destroy(self): """ @@ -441,7 +441,7 @@ def destroy(self): @returns C{bool} """ - return self.driver.destroy_snapshot(snapshot=self) + return self.driver.destroy_volume_snapshot(snapshot=self) class NodeDriver(BaseDriver): @@ -817,23 +817,23 @@ def list_volume_snapshots(self, volume): raise NotImplementedError( 'list_volume_snapshots not implemented for this driver') - def snapshot_volume(self, volume, name): + def create_volume_snapshot(self, volume, name): """ Creates a snapshot of the storage volume. @rtype: L{VolumeSnapshot} """ raise NotImplementedError( - 'snapshot_volume not implemented for this driver') + 'create_volume_snapshot not implemented for this driver') - def destroy_snapshot(self, snapshot): + def destroy_volume_snapshot(self, snapshot): """ Destroys a snapshot. @rtype: L{bool} """ raise NotImplementedError( - 'destroy_snapshot not implemented for this driver') + 'destroy_volume_snapshot not implemented for this driver') def _wait_until_running(self, node, wait_period=3, timeout=600, ssh_interface='public_ips', force_ipv4=True): From 6a741b22e8893bab0e0ed2d6509eee1ea9265bf9 Mon Sep 17 00:00:00 2001 From: Bob Thompson Date: Tue, 16 Jul 2013 14:16:16 -0400 Subject: [PATCH 115/143] Issue LIBCLOUD-364: Added Ubuntu Linux 12.04 to ElasticHosts. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/elastichosts.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libcloud/compute/drivers/elastichosts.py b/libcloud/compute/drivers/elastichosts.py index ea801ee8cc..01ab5ca538 100644 --- a/libcloud/compute/drivers/elastichosts.py +++ b/libcloud/compute/drivers/elastichosts.py @@ -89,6 +89,12 @@ 'size_gunzipped': '1GB', 'supports_deployment': True, }, + '62f512cd-82c7-498e-88d8-a09ac2ef20e7': { + 'uuid': '62f512cd-82c7-498e-88d8-a09ac2ef20e7', + 'description': 'Ubuntu Linux 12.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': { 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0', 'description': 'Windows Web Server 2008', From 2fe10abfd12ccdc052a93f2bec43339c12d574f3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 17 Jul 2013 23:47:23 +0200 Subject: [PATCH 116/143] Update changes. --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 941238fc7c..41b589ab10 100644 --- a/CHANGES +++ b/CHANGES @@ -19,6 +19,9 @@ Changes with Apache Libcloud in development - Add new driver for Google Compute Engine (LIBCLOUD-266) [Rick Wright] + - Add Ubuntu Linux 12.04 image to ElasticHosts driver. (LIBCLOUD-364) + [Bob Thompson] + Changes with Apache Libcloud 0.13.0: *) General From 272d80a1ee6b3b8458903036f7b7550f9e9845f8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 23 Jul 2013 20:39:16 +0200 Subject: [PATCH 117/143] Fix a regression with calling encode_container_name instead of encode_object_name on object name in get_object method. Reported by Ben Meng, part of LIBCLOUD-366. --- CHANGES | 7 +++++++ libcloud/storage/drivers/cloudfiles.py | 2 +- libcloud/test/storage/test_cloudfiles.py | 21 +++++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 41b589ab10..a308d8b97e 100644 --- a/CHANGES +++ b/CHANGES @@ -22,6 +22,13 @@ Changes with Apache Libcloud in development - Add Ubuntu Linux 12.04 image to ElasticHosts driver. (LIBCLOUD-364) [Bob Thompson] + *) Storage + + - Fix a regression with calling encode_container_name instead of + encode_object_name on object name in get_object method. + Reported by Ben Meng (LIBCLOUD-366) + [Tomaz Muraus] + Changes with Apache Libcloud 0.13.0: *) General diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index d3fddc8a01..7e810b6733 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -260,7 +260,7 @@ def get_container(self, container_name): def get_object(self, container_name, object_name): container = self.get_container(container_name) container_name_encoded = self._encode_container_name(container_name) - object_name_encoded = self._encode_container_name(object_name) + object_name_encoded = self._encode_object_name(object_name) response = self.connection.request('/%s/%s' % (container_name_encoded, object_name_encoded), diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index e0390b54ec..b70be4f9a3 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -201,6 +201,11 @@ def test_get_object_success(self): self.assertEqual(obj.meta_data['foo-bar'], 'test 1') self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + def test_get_object_object_name_encoding(self): + obj = self.driver.get_object(container_name='test_container', + object_name='~/test_object/') + self.assertEqual(obj.name, '~/test_object/') + def test_get_object_not_found(self): try: self.driver.get_object(container_name='test_container', @@ -839,6 +844,22 @@ def _v1_MossoCloudFS_test_container_test_object( 'content-type': 'application/zip'}) return (status_code, body, headers, httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_test_container__7E_test_object( + self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'HEAD': + # get_object_name_encoding + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({ 'content-length': 555, + 'last-modified': 'Tue, 25 Jan 2011 22:01:49 GMT', + 'etag': '6b21c4a111ac178feacf9ec9d0c71f17', + 'x-object-meta-foo-bar': 'test 1', + 'x-object-meta-bar-foo': 'test 2', + 'content-type': 'application/zip'}) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_create_container( self, method, url, body, headers): # test_create_container_success From ac7c6adb2d07f86db4a24ce0a9f58c7d8fff586d Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 24 Jul 2013 14:28:31 -0700 Subject: [PATCH 118/143] Corrected an incorrect return-type in a docstring. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 4904443f4e..f9b8fa3156 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -760,7 +760,7 @@ def ex_list_ip_addresses(self, node_id): @param node_id: ID of the node which should be used @type node_id: C{str} - @rtype: C{bool} + @rtype: C{OpenStack_1_0_NodeIpAddresses} """ # @TODO: Remove this if in 0.6 if isinstance(node_id, Node): From d56f0c065099b30dc260775aabed30ec3fb57491 Mon Sep 17 00:00:00 2001 From: joe miller Date: Sun, 21 Jul 2013 07:48:23 -0700 Subject: [PATCH 119/143] Issue LIBCLOUD-365: change ec2 ex_blockdevicemappings to support all possible key/values Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 18 ++++++++++++------ libcloud/test/compute/test_ec2.py | 30 +++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 26018ac122..1f4db0be48 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1291,7 +1291,8 @@ def create_node(self, **kwargs): @keyword ex_blockdevicemappings: C{list} of C{dict} block device mappings. Example: - [{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}] + [{'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10}, + {'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}] @type ex_blockdevicemappings: C{list} of C{dict} """ image = kwargs["image"] @@ -1331,11 +1332,16 @@ def create_node(self, **kwargs): params['ClientToken'] = kwargs['ex_clienttoken'] if 'ex_blockdevicemappings' in kwargs: - for index, mapping in enumerate(kwargs['ex_blockdevicemappings']): - params['BlockDeviceMapping.%d.DeviceName' % (index + 1)] = \ - mapping['DeviceName'] - params['BlockDeviceMapping.%d.VirtualName' % (index + 1)] = \ - mapping['VirtualName'] + if not isinstance(kwargs['ex_blockdevicemappings'], (list, tuple)): + raise AttributeError('ex_blockdevicemappings not list or tuple') + + for idx, mapping in enumerate(kwargs['ex_blockdevicemappings'], + start=1): + if not isinstance(mapping, dict): + raise AttributeError('mapping %s in ex_blockdevicemappings ' + 'not a dict' % mapping) + for k, v in mapping.items(): + params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v) object = self.connection.request(self.path, params=params).object nodes = self._to_nodes(object, 'instancesSet/item') diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index d0e2c8a610..b9de7cc871 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -185,6 +185,7 @@ def test_ex_create_node_with_ex_blockdevicemappings(self): size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) mappings = [ + {'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10}, {'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}, {'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'} ] @@ -192,6 +193,25 @@ def test_ex_create_node_with_ex_blockdevicemappings(self): ex_blockdevicemappings=mappings) self.assertEqual(node.id, 'i-2ba64342') + def test_ex_create_node_with_ex_blockdevicemappings_attribute_error(self): + EC2MockHttp.type = 'create_ex_blockdevicemappings' + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + + mappings = 'this should be a list' + self.assertRaises(AttributeError, self.driver.create_node, name='foo', + image=image, size=size, + ex_blockdevicemappings=mappings) + + mappings = ['this should be a dict'] + self.assertRaises(AttributeError, self.driver.create_node, name='foo', + image=image, size=size, + ex_blockdevicemappings=mappings) + def test_destroy_node(self): node = Node('i-4382922a', None, None, None, None, self.driver) ret = self.driver.destroy_node(node) @@ -440,12 +460,16 @@ def _RunInstances(self, method, url, body, headers): def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers): parameters = dict(parse_qsl(url)) self.assertEqual(parameters['BlockDeviceMapping.1.DeviceName'], + '/dev/sda1') + self.assertEqual(parameters['BlockDeviceMapping.1.Ebs.VolumeSize'], + '10') + self.assertEqual(parameters['BlockDeviceMapping.2.DeviceName'], '/dev/sdb') - self.assertEqual(parameters['BlockDeviceMapping.1.VirtualName'], + self.assertEqual(parameters['BlockDeviceMapping.2.VirtualName'], 'ephemeral0') - self.assertEqual(parameters['BlockDeviceMapping.2.DeviceName'], + self.assertEqual(parameters['BlockDeviceMapping.3.DeviceName'], '/dev/sdc') - self.assertEqual(parameters['BlockDeviceMapping.2.VirtualName'], + self.assertEqual(parameters['BlockDeviceMapping.3.VirtualName'], 'ephemeral1') body = self.fixtures.load('run_instances.xml') From 87e49260b36ada847637584eebb3d4ab5bfe8148 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 25 Jul 2013 09:20:46 +0200 Subject: [PATCH 120/143] Fix Python 2.5 test failure. --- libcloud/compute/drivers/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 1f4db0be48..aecfdb2495 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1335,8 +1335,8 @@ def create_node(self, **kwargs): if not isinstance(kwargs['ex_blockdevicemappings'], (list, tuple)): raise AttributeError('ex_blockdevicemappings not list or tuple') - for idx, mapping in enumerate(kwargs['ex_blockdevicemappings'], - start=1): + for idx, mapping in enumerate(kwargs['ex_blockdevicemappings']): + idx += 1 # we want 1-based indexes if not isinstance(mapping, dict): raise AttributeError('mapping %s in ex_blockdevicemappings ' 'not a dict' % mapping) From 6e7875276d0d35ab0f6de78bcd01daf0f53fdc74 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 12:17:35 -0700 Subject: [PATCH 121/143] Added the boilerplate stuff for sphinx docs Signed-off-by: Tomaz Muraus --- docs/Makefile | 153 ++++++++++++++++++++++++++++++ docs/conf.py | 246 +++++++++++++++++++++++++++++++++++++++++++++++++ docs/index.rst | 7 ++ docs/make.bat | 190 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 596 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 docs/make.bat diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..3d1de36347 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ApacheLibcloud.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ApacheLibcloud.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ApacheLibcloud" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ApacheLibcloud" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..fd2ab34415 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# +# Apache Libcloud documentation build configuration file, created by +# sphinx-quickstart on Wed Jul 31 12:16:27 2013. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Apache Libcloud' +copyright = u'2013, The Apache Software Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.14.0' +# The full version, including alpha/beta/rc tags. +release = '0.14.0-dev' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ApacheLibclouddoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'ApacheLibcloud.tex', u'Apache Libcloud Documentation', + u'The Apache Software Foundation', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'apachelibcloud', u'Apache Libcloud Documentation', + [u'The Apache Software Foundation'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'ApacheLibcloud', u'Apache Libcloud Documentation', + u'The Apache Software Foundation', 'ApacheLibcloud', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..4675c85e7d --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,7 @@ +Welcome to Apache Libcloud's documentation! +=========================================== + +Contents: + +.. toctree:: + :maxdepth: 2 diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000000..ee239c92e6 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\ApacheLibcloud.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\ApacheLibcloud.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end From 5198be7db6932b092d95155da30a07cf482318f7 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 12:18:46 -0700 Subject: [PATCH 122/143] ignore the sphinx build directory Signed-off-by: Tomaz Muraus --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0bc25c1f09..ef56c2ea93 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ MANIFEST coverage_html_report/ .idea dist/*apache-libcloud* +_build/ From 67b5934b01095bf4618be47975a4b00767946355 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 12:20:03 -0700 Subject: [PATCH 123/143] Noted that the Sphinx docs are a work in progress Signed-off-by: Tomaz Muraus --- docs/index.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index 4675c85e7d..9d3438c226 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,6 +1,10 @@ Welcome to Apache Libcloud's documentation! =========================================== +Right now we're in the progress of migrating our existing documentation to +Sphinx, so this may be incomplete. We apologize for the inconvenience and we +hope the upcoming awesomeness will make up for it. + Contents: .. toctree:: From 259657f2372476081695c369bce0875e64b6eb0b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 22:38:39 +0200 Subject: [PATCH 124/143] Print Python version, change if statement. --- libcloud/utils/py3.py | 5 ++++- setup.py | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 0d6987dead..206324e1fb 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -146,4 +146,7 @@ def relpath(path, start=posixpath.curdir): return posixpath.curdir return posixpath.join(*rel_list) -unittest2_required = not (PY27 or PY3) +if PY27 or PY3: + unittest2_required = False +else: + unittest2_required = True diff --git a/setup.py b/setup.py index 1adee5de32..fc82122d68 100644 --- a/setup.py +++ b/setup.py @@ -95,6 +95,7 @@ def run(self): import unittest2 unittest2 except ImportError: + print('Python version: %s' % (sys.version)) print('Missing "unittest2" library. unittest2 is library is needed ' 'to run the tests. You can install it using pip: ' 'pip install unittest2') From ade45fdea2d93a2d15af39bc2fac0eb522a282e9 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 23:17:51 +0200 Subject: [PATCH 125/143] Modify py26 tox environment to also run coverage command. --- tox.ini | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tox.ini b/tox.ini index 7212a7e259..536b750ddd 100644 --- a/tox.ini +++ b/tox.ini @@ -10,6 +10,15 @@ deps = mock paramiko commands = python setup.py test +[testenv:py26] +deps = mock + unittest2 + lockfile + paramiko + coverage +commands = python setup.py test + python setup.py coverage + [testenv:py25] deps = mock unittest2 From e8101cb4cbbf682eb2c459ff80be245c26bb6fbd Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 14:28:30 -0700 Subject: [PATCH 126/143] Initial set of indices for each component Signed-off-by: Tomaz Muraus --- docs/compute/index.rst | 23 +++++++++++++++++++++++ docs/dns/index.rst | 2 ++ docs/index.rst | 27 +++++++++++++++++++++++---- docs/loadbalancers/index.rst | 2 ++ docs/storage/index.rst | 2 ++ 5 files changed, 52 insertions(+), 4 deletions(-) create mode 100644 docs/compute/index.rst create mode 100644 docs/dns/index.rst create mode 100644 docs/loadbalancers/index.rst create mode 100644 docs/storage/index.rst diff --git a/docs/compute/index.rst b/docs/compute/index.rst new file mode 100644 index 0000000000..7a618a0fef --- /dev/null +++ b/docs/compute/index.rst @@ -0,0 +1,23 @@ +Compute +======= + +The compute component of ``libcloud`` allows you to manage cloud and virtual +servers offered by different providers, more than 20 in total. + +In addition to managing the servers this component also allows you to run +deployment scripts on newly created servers. Deployment or "bootstrap" scripts +allow you to execute arbitrary shell commands. This functionality is usually +used to prepare your freshly created server, install your SSH key, and run a +configuration management tool (such as Puppet, Chef, or cfengine) on it. + +Terminology +----------- + +* **Node** - represents a cloud or virtual server. +* **NodeSize** - represents node hardware configuration. Usually this is amount + of the available RAM, bandwidth, CPU speed and disk size. Most of the drivers + also expose hourly price (in dollars) for the Node of this size. +* **NodeImage** - represents an operating system image. +* **NodeLocation** - represents a physical location where a server can be. +* **NodeState** - represents a node state. Standard states are: ``running``, + ``rebooting``, ``terminated``, ``pending``, and ``unknown```. diff --git a/docs/dns/index.rst b/docs/dns/index.rst new file mode 100644 index 0000000000..51bbfe369c --- /dev/null +++ b/docs/dns/index.rst @@ -0,0 +1,2 @@ +DNS +=== diff --git a/docs/index.rst b/docs/index.rst index 9d3438c226..bf200e08b8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,11 +1,30 @@ Welcome to Apache Libcloud's documentation! =========================================== -Right now we're in the progress of migrating our existing documentation to -Sphinx, so this may be incomplete. We apologize for the inconvenience and we -hope the upcoming awesomeness will make up for it. +.. note:: + + Right now we're in the progress of migrating our existing documentation to + Sphinx, so this may be incomplete. We apologize for the inconvenience and we + hope the upcoming awesomeness will make up for it. + +Apache Libcloud is a Python library which abstracts away the differences +between multiple cloud providers. It current can manage four different cloud +resources: + +* :doc:`Cloud servers ` - services such as Amazon EC2 and + RackSpace CloudServers +* :doc:`Cloud object storage ` - services such as Amazon S3 and + Rackspace CloudFiles +* :doc:`Load Balancers as a Service ` +* :doc:`DNS as a Service ` + Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + + compute/index + storage/index + loadbalancers/index + dns/index diff --git a/docs/loadbalancers/index.rst b/docs/loadbalancers/index.rst new file mode 100644 index 0000000000..35c5ed14c0 --- /dev/null +++ b/docs/loadbalancers/index.rst @@ -0,0 +1,2 @@ +Load Balancers +============== diff --git a/docs/storage/index.rst b/docs/storage/index.rst new file mode 100644 index 0000000000..17a6f7ce85 --- /dev/null +++ b/docs/storage/index.rst @@ -0,0 +1,2 @@ +Object Storage +============== From bd1e71bb12b91f49ad7fd6d4ab52da6b5ae9958d Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 14:41:55 -0700 Subject: [PATCH 127/143] Typo fix Signed-off-by: Tomaz Muraus --- docs/compute/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/compute/index.rst b/docs/compute/index.rst index 7a618a0fef..5c0939a410 100644 --- a/docs/compute/index.rst +++ b/docs/compute/index.rst @@ -20,4 +20,4 @@ Terminology * **NodeImage** - represents an operating system image. * **NodeLocation** - represents a physical location where a server can be. * **NodeState** - represents a node state. Standard states are: ``running``, - ``rebooting``, ``terminated``, ``pending``, and ``unknown```. + ``rebooting``, ``terminated``, ``pending``, and ``unknown``. From 4cb45c079182e320f6d950d25e7bc603552ffd5f Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 14:43:18 -0700 Subject: [PATCH 128/143] Added some todos Signed-off-by: Tomaz Muraus --- docs/dns/index.rst | 4 ++++ docs/loadbalancers/index.rst | 4 ++++ docs/storage/index.rst | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/docs/dns/index.rst b/docs/dns/index.rst index 51bbfe369c..7e899b15fd 100644 --- a/docs/dns/index.rst +++ b/docs/dns/index.rst @@ -1,2 +1,6 @@ DNS === + +.. note:: + + TODO: Write me! diff --git a/docs/loadbalancers/index.rst b/docs/loadbalancers/index.rst index 35c5ed14c0..f9baa2d9b4 100644 --- a/docs/loadbalancers/index.rst +++ b/docs/loadbalancers/index.rst @@ -1,2 +1,6 @@ Load Balancers ============== + +.. note:: + + TODO: Write me! diff --git a/docs/storage/index.rst b/docs/storage/index.rst index 17a6f7ce85..3ab1bb5d40 100644 --- a/docs/storage/index.rst +++ b/docs/storage/index.rst @@ -1,2 +1,6 @@ Object Storage ============== + +.. note:: + + TODO: Write me! From 9d4e3ffa7652425ad9c86df754861db4fe220e15 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 15:06:26 -0700 Subject: [PATCH 129/143] Added some examples Signed-off-by: Tomaz Muraus --- docs/compute/index.rst | 5 +++++ docs/index.rst | 21 ++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/docs/compute/index.rst b/docs/compute/index.rst index 5c0939a410..cad3179eb4 100644 --- a/docs/compute/index.rst +++ b/docs/compute/index.rst @@ -21,3 +21,8 @@ Terminology * **NodeLocation** - represents a physical location where a server can be. * **NodeState** - represents a node state. Standard states are: ``running``, ``rebooting``, ``terminated``, ``pending``, and ``unknown``. + +Examples +-------- + +We have :doc:`examples of several common patterns `. diff --git a/docs/index.rst b/docs/index.rst index bf200e08b8..9f8e30b5f4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,20 +11,19 @@ Apache Libcloud is a Python library which abstracts away the differences between multiple cloud providers. It current can manage four different cloud resources: -* :doc:`Cloud servers ` - services such as Amazon EC2 and +* :doc:`Cloud servers ` - services such as Amazon EC2 and RackSpace CloudServers -* :doc:`Cloud object storage ` - services such as Amazon S3 and +* :doc:`Cloud object storage ` - services such as Amazon S3 and Rackspace CloudFiles -* :doc:`Load Balancers as a Service ` -* :doc:`DNS as a Service ` +* :doc:`Load Balancers as a Service ` +* :doc:`DNS as a Service ` -Contents: - .. toctree:: - :maxdepth: 1 + :glob: + :hidden: - compute/index - storage/index - loadbalancers/index - dns/index + compute/* + storage/* + loadbalancers/* + dns/* From 8a868d974e53bccdf10070042487561586b44dcf Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 15:06:33 -0700 Subject: [PATCH 130/143] Really added the examples Signed-off-by: Tomaz Muraus --- docs/compute/examples.rst | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 docs/compute/examples.rst diff --git a/docs/compute/examples.rst b/docs/compute/examples.rst new file mode 100644 index 0000000000..1a7e2bd759 --- /dev/null +++ b/docs/compute/examples.rst @@ -0,0 +1,42 @@ +Compute Examples +================ + +Create an OpenStack node using trystack.org provider +---------------------------------------------------- + +`trystack.org`_ allows users to try out OpenStack for free. This example +demonstrates how to launch an OpenStack node on the ``trystack.org`` provider +using a generic OpenStack driver. + +.. sourcecode:: python + + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + + import libcloud.security + + # At the time this example was written, https://nova-api.trystack.org:5443 + # was using a certificate issued by a Certificate Authority (CA) which is + # not included in the default Ubuntu certificates bundle (ca-certificates). + # Note: Code like this poses a security risk (MITM attack) and that's the + # reason why you should never use it for anything else besides testing. You + # have been warned. + libcloud.security.VERIFY_SSL_CERT = False + + OpenStack = get_driver(Provider.OPENSTACK) + + driver = OpenStack('your username', 'your password', + ex_force_auth_url='https://nova-api.trystack.org:5443/v2.0', + ex_force_auth_version='2.0_password') + + nodes = driver.list_nodes() + + images = driver.list_images() + sizes = driver.list_sizes() + size = [s for s in sizes if s.ram == 512][0] + image = [i for i in images if i.name == 'natty-server-cloudimg-amd64'][0] + + node = driver.create_node(name='test node', image=image, size=size) + + +.. _`trystack.org`: http://trystack.org/ From be1bdc012cda62e962272de586df9c2d6e5311ea Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Wed, 31 Jul 2013 16:04:59 -0700 Subject: [PATCH 131/143] Included API doc for base node driver, also ported docstrings from epytext to sphinx Signed-off-by: Tomaz Muraus --- docs/compute/api.rst | 5 + docs/compute/index.rst | 6 + libcloud/compute/base.py | 262 +++++++++++++++++++-------------------- 3 files changed, 136 insertions(+), 137 deletions(-) create mode 100644 docs/compute/api.rst diff --git a/docs/compute/api.rst b/docs/compute/api.rst new file mode 100644 index 0000000000..13d9a1f8e6 --- /dev/null +++ b/docs/compute/api.rst @@ -0,0 +1,5 @@ +Compute Base API +================ + +.. autoclass:: libcloud.compute.base.NodeDriver + :members: diff --git a/docs/compute/index.rst b/docs/compute/index.rst index cad3179eb4..af0dbb89cb 100644 --- a/docs/compute/index.rst +++ b/docs/compute/index.rst @@ -26,3 +26,9 @@ Examples -------- We have :doc:`examples of several common patterns `. + +API Reference +------------- + +There is a reference to :doc:`all the methods on the base compute driver +`. diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index b5def91f41..53a22314a4 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -78,7 +78,7 @@ def __init__(self): def get_uuid(self): """Unique hash for a node, node image, or node size - @return: C{string} + :return: ``string`` The hash is a function of an SHA1 hash of the node, node image, or node size's ID and its driver which means that it should be @@ -188,7 +188,7 @@ def _get_private_ips(self): def reboot(self): """Reboot this node - @return: C{bool} + :return: ``bool`` This calls the node's driver and reboots the node @@ -210,7 +210,7 @@ def reboot(self): def destroy(self): """Destroy this node - @return: C{bool} + :return: ``bool`` This calls the node's driver and destroys the node @@ -382,14 +382,14 @@ def attach(self, node, device=None): """ Attach this volume to a node. - @param node: Node to attach volume to - @type node: L{Node} + :param node: Node to attach volume to + :type node: :class:`Node` - @param device: Where the device is exposed, + :param device: Where the device is exposed, e.g. '/dev/sdb (optional) - @type device: C{str} + :type device: ``str`` - @returns C{bool} + :return s ``bool`` """ return self.driver.attach_volume(node=node, volume=self, device=device) @@ -398,14 +398,14 @@ def detach(self): """ Detach this volume from its node - @returns C{bool} + :return s ``bool`` """ return self.driver.detach_volume(volume=self) def list_snapshots(self): """ - @returns C{list} of C{VolumeSnapshot} + :return s ``list`` of ``VolumeSnapshot`` """ return self.driver.list_volume_snapshots(volume=self) @@ -413,7 +413,7 @@ def snapshot(self, name): """ Creates a snapshot of this volume. - @returns C{VolumeSnapshot} + :return s ``VolumeSnapshot`` """ return self.driver.create_volume_snapshot(volume=self, name=name) @@ -421,7 +421,7 @@ def destroy(self): """ Destroy this storage volume. - @returns C{bool} + :return s ``bool`` """ return self.driver.destroy_volume(volume=self) @@ -439,7 +439,7 @@ def destroy(self): """ Destroys this snapshot. - @returns C{bool} + :return s ``bool`` """ return self.driver.destroy_volume_snapshot(snapshot=self) @@ -461,10 +461,10 @@ class NodeDriver(BaseDriver): features = {"create_node": []} """ List of available features for a driver. - - L{create_node} - - ssh_key: Supports L{NodeAuthSSHKey} as an authentication method + - :class:`create_node` + - ssh_key: Supports :class:`NodeAuthSSHKey` as an authentication method for nodes. - - password: Supports L{NodeAuthPassword} as an authentication + - password: Supports :class:`NodeAuthPassword` as an authentication method for nodes. - generates_password: Returns a password attribute on the Node object returned from creation. @@ -481,26 +481,26 @@ def __init__(self, key, secret=None, secure=True, host=None, port=None, def create_node(self, **kwargs): """Create a new node instance. - @keyword name: String with a name for this new node (required) - @type name: C{str} + :param name: String with a name for this new node (required) + :type name: ``str`` - @keyword size: The size of resources allocated to this node. + :param size: The size of resources allocated to this node. (required) - @type size: L{NodeSize} + :type size: :class:`NodeSize` - @keyword image: OS Image to boot on node. (required) - @type image: L{NodeImage} + :param image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` - @keyword location: Which data center to create a node in. If empty, + :param location: Which data center to create a node in. If empty, undefined behavoir will be selected. (optional) - @type location: L{NodeLocation} + :type location: :class:`NodeLocation` - @keyword auth: Initial authentication information for the node + :param auth: Initial authentication information for the node (optional) - @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` - @return: The newly created node. - @rtype: L{Node} + :return: The newly created node. + :rtype: :class:`Node` """ raise NotImplementedError( 'create_node not implemented for this driver') @@ -511,11 +511,11 @@ def destroy_node(self, node): Depending upon the provider, this may destroy all data associated with the node, including backups. - @param node: The node to be destroyed - @type node: L{Node} + :param node: The node to be destroyed + :type node: :class:`Node` - @return: True if the destroy was successful, otherwise False - @rtype: C{bool} + :return: True if the destroy was successful, otherwise False + :rtype: ``bool`` """ raise NotImplementedError( 'destroy_node not implemented for this driver') @@ -524,11 +524,11 @@ def reboot_node(self, node): """ Reboot a node. - @param node: The node to be rebooted - @type node: L{Node} + :param node: The node to be rebooted + :type node: :class:`Node` - @return: True if the reboot was successful, otherwise False - @rtype: C{bool} + :return: True if the reboot was successful, otherwise False + :rtype: ``bool`` """ raise NotImplementedError( 'reboot_node not implemented for this driver') @@ -536,8 +536,8 @@ def reboot_node(self, node): def list_nodes(self): """ List all nodes - @return: list of node objects - @rtype: C{list} of L{Node} + :return: list of node objects + :rtype: ``list`` of :class:`Node` """ raise NotImplementedError( 'list_nodes not implemented for this driver') @@ -546,11 +546,11 @@ def list_images(self, location=None): """ List images on a provider - @keyword location: The location at which to list images - @type location: L{NodeLocation} + :param location: The location at which to list images + :type location: :class:`NodeLocation` - @return: list of node image objects - @rtype: C{list} of L{NodeImage} + :return: list of node image objects + :rtype: ``list`` of :class:`NodeImage` """ raise NotImplementedError( 'list_images not implemented for this driver') @@ -559,11 +559,11 @@ def list_sizes(self, location=None): """ List sizes on a provider - @keyword location: The location at which to list sizes - @type location: L{NodeLocation} + :param location: The location at which to list sizes + :type location: :class:`NodeLocation` - @return: list of node size objects - @rtype: C{list} of L{NodeSize} + :return: list of node size objects + :rtype: ``list`` of :class:`NodeSize` """ raise NotImplementedError( 'list_sizes not implemented for this driver') @@ -572,8 +572,8 @@ def list_locations(self): """ List data centers for a provider - @return: list of node location objects - @rtype: C{list} of L{NodeLocation} + :return: list of node location objects + :rtype: ``list`` of :class:`NodeLocation` """ raise NotImplementedError( 'list_locations not implemented for this driver') @@ -585,7 +585,7 @@ def deploy_node(self, **kwargs): Depends on a Provider Driver supporting either using a specific password or returning a generated password. - This function may raise a L{DeploymentException}, if a create_node + This function may raise a :class:`DeploymentException`, if a create_node call was successful, but there is a later error (like SSH failing or timing out). This exception includes a Node object which you may want to destroy if incomplete deployments are not desirable. @@ -609,48 +609,48 @@ def deploy_node(self, **kwargs): Deploy node is typically not overridden in subclasses. The existing implementation should be able to handle most such. - @inherits: L{NodeDriver.create_node} + @inherits: :class:`NodeDriver.create_node` - @keyword deploy: Deployment to run once machine is online and + :param deploy: Deployment to run once machine is online and availble to SSH. - @type deploy: L{Deployment} + :type deploy: :class:`Deployment` - @keyword ssh_username: Optional name of the account which is used + :param ssh_username: Optional name of the account which is used when connecting to SSH server (default is root) - @type ssh_username: C{str} + :type ssh_username: ``str`` - @keyword ssh_alternate_usernames: Optional list of ssh usernames to + :param ssh_alternate_usernames: Optional list of ssh usernames to try to connect with if using the default one fails - @type ssh_alternate_usernames: C{list} + :type ssh_alternate_usernames: ``list`` - @keyword ssh_port: Optional SSH server port (default is 22) - @type ssh_port: C{int} + :param ssh_port: Optional SSH server port (default is 22) + :type ssh_port: ``int`` - @keyword ssh_timeout: Optional SSH connection timeout in seconds + :param ssh_timeout: Optional SSH connection timeout in seconds (default is None) - @type ssh_timeout: C{float} + :type ssh_timeout: ``float`` - @keyword auth: Initial authentication information for the node + :param auth: Initial authentication information for the node (optional) - @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` - @keyword ssh_key: A path (or paths) to an SSH private key with which + :param ssh_key: A path (or paths) to an SSH private key with which to attempt to authenticate. (optional) - @type ssh_key: C{str} or C{list} of C{str} + :type ssh_key: ``str`` or ``list`` of ``str`` - @keyword timeout: How many seconds to wait before timing out. + :param timeout: How many seconds to wait before timing out. (default is 600) - @type timeout: C{int} + :type timeout: ``int`` - @keyword max_tries: How many times to retry if a deployment fails + :param max_tries: How many times to retry if a deployment fails before giving up (default is 3) - @type max_tries: C{int} + :type max_tries: ``int`` - @keyword ssh_interface: The interface to wait for. Default is + :param ssh_interface: The interface to wait for. Default is 'public_ips', other option is 'private_ips'. - @type ssh_interface: C{str} + :type ssh_interface: ``str`` """ if not libcloud.compute.ssh.have_paramiko: raise RuntimeError('paramiko is not installed. You can install ' + @@ -734,23 +734,23 @@ def create_volume(self, size, name, location=None, snapshot=None): """ Create a new volume. - @param size: Size of volume in gigabytes (required) - @type size: C{int} + :param size: Size of volume in gigabytes (required) + :type size: ``int`` - @keyword name: Name of the volume to be created - @type name: C{str} + :param name: Name of the volume to be created + :type name: ``str`` - @keyword location: Which data center to create a volume in. If + :param location: Which data center to create a volume in. If empty, undefined behavoir will be selected. (optional) - @type location: L{NodeLocation} + :type location: :class:`NodeLocation` - @keyword snapshot: Name of snapshot from which to create the new + :param snapshot: Name of snapshot from which to create the new volume. (optional) - @type snapshot: C{str} + :type snapshot: ``str`` - @return: The newly created volume. - @rtype: L{StorageVolume} + :return: The newly created volume. + :rtype: :class:`StorageVolume` """ raise NotImplementedError( 'create_volume not implemented for this driver') @@ -759,10 +759,10 @@ def destroy_volume(self, volume): """ Destroys a storage volume. - @param volume: Volume to be destroyed - @type volume: L{StorageVolume} + :param volume: Volume to be destroyed + :type volume: :class:`StorageVolume` - @rtype: C{bool} + :rtype: ``bool`` """ raise NotImplementedError( @@ -772,17 +772,11 @@ def attach_volume(self, node, volume, device=None): """ Attaches volume to node. - @param node: Node to attach volume to - @type node: L{Node} + :param Node node: Node to attach volume to. + :param StorageVolume volume: Volume to attach. + :param str device: Where the device is exposed, e.g. '/dev/sdb' - @param volume: Volume to attach - @type volume: L{StorageVolume} - - @param device: Where the device is exposed, - e.g. '/dev/sdb (optional) - @type device: C{str} - - @rtype: C{bool} + :return bool: """ raise NotImplementedError('attach not implemented for this driver') @@ -790,10 +784,8 @@ def detach_volume(self, volume): """ Detaches a volume from a node. - @param volume: Volume to be detached - @type volume: L{StorageVolume} - - @rtype: C{bool} + :param StorageVolume volume: Volume to be detached + :return bool: """ raise NotImplementedError('detach not implemented for this driver') @@ -802,8 +794,7 @@ def list_volumes(self): """ List storage volumes. - @return: list of storageVolume objects - @rtype: C{list} of L{StorageVolume} + :return [StorageVolume]: """ raise NotImplementedError( 'list_volumes not implemented for this driver') @@ -812,7 +803,7 @@ def list_volume_snapshots(self, volume): """ List snapshots for a storage volume. - @rtype: C{list} of L{VolumeSnapshot} + :rtype: ``list`` of :class:`VolumeSnapshot` """ raise NotImplementedError( 'list_volume_snapshots not implemented for this driver') @@ -821,7 +812,7 @@ def create_volume_snapshot(self, volume, name): """ Creates a snapshot of the storage volume. - @rtype: L{VolumeSnapshot} + :rtype: :class:`VolumeSnapshot` """ raise NotImplementedError( 'create_volume_snapshot not implemented for this driver') @@ -830,7 +821,7 @@ def destroy_volume_snapshot(self, snapshot): """ Destroys a snapshot. - @rtype: L{bool} + :rtype: :class:`bool` """ raise NotImplementedError( 'destroy_volume_snapshot not implemented for this driver') @@ -850,31 +841,28 @@ def wait_until_running(self, nodes, wait_period=3, timeout=600, Block until the given nodes are fully booted and have an IP address assigned. - @keyword nodes: list of node instances. - @type nodes: C{List} of L{Node} + :param nodes: list of node instances. + :type nodes: ``List`` of :class:`Node` - @keyword wait_period: How many seconds to between each loop + :param wait_period: How many seconds to between each loop iteration (default is 3) - @type wait_period: C{int} + :type wait_period: ``int`` - @keyword timeout: How many seconds to wait before timing out + :param timeout: How many seconds to wait before timing out (default is 600) - @type timeout: C{int} + :type timeout: ``int`` - @keyword ssh_interface: The interface to wait for. + :param ssh_interface: The interface to wait for. Default is 'public_ips', other option is 'private_ips'. - @type ssh_interface: C{str} + :type ssh_interface: ``str`` - @keyword force_ipv4: Ignore ipv6 IP addresses (default is True). - @type force_ipv4: C{bool} + :param force_ipv4: Ignore ipv6 IP addresses (default is True). + :type force_ipv4: ``bool`` - @return: C{[(Node, ip_addresses)]} list of tuple of Node instance and + :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and list of ip_address on success. - - @return: List of tuple of Node instance and list of ip_address on - success (node, ip_addresses). - @rtype: C{list} of C{tuple} + :rtype: ``list`` of ``tuple`` """ def is_supported(address): """Return True for supported address""" @@ -923,18 +911,18 @@ def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): Try to connect to the remote SSH server. If a connection times out or is refused it is retried up to timeout number of seconds. - @keyword ssh_client: A configured SSHClient instance - @type ssh_client: C{SSHClient} + :param ssh_client: A configured SSHClient instance + :type ssh_client: ``SSHClient`` - @keyword wait_period: How many seconds to wait between each loop + :param wait_period: How many seconds to wait between each loop iteration (default is 1.5) - @type wait_period: C{int} + :type wait_period: ``int`` - @keyword timeout: How many seconds to wait before timing out + :param timeout: How many seconds to wait before timing out (default is 600) - @type timeout: C{int} + :type timeout: ``int`` - @return: C{SSHClient} on success + :return: ``SSHClient`` on success """ start = time.time() end = start + timeout @@ -978,20 +966,20 @@ def _run_deployment_script(self, task, node, ssh_client, max_tries=3): Run the deployment script on the provided node. At this point it is assumed that SSH connection has already been established. - @keyword task: Deployment task to run on the node. - @type task: C{Deployment} + :param task: Deployment task to run on the node. + :type task: ``Deployment`` - @keyword node: Node to operate one - @type node: C{Node} + :param node: Node to operate one + :type node: ``Node`` - @keyword ssh_client: A configured and connected SSHClient instance - @type ssh_client: C{SSHClient} + :param ssh_client: A configured and connected SSHClient instance + :type ssh_client: ``SSHClient`` - @keyword max_tries: How many times to retry if a deployment fails + :param max_tries: How many times to retry if a deployment fails before giving up (default is 3) - @type max_tries: C{int} + :type max_tries: ``int`` - @return: C{Node} Node instance on success. + :return: ``Node`` Node instance on success. """ tries = 0 while tries < max_tries: @@ -1018,10 +1006,10 @@ def is_private_subnet(ip): """ Utility function to check if an IP address is inside a private subnet. - @type ip: C{str} - @keyword ip: IP address to check + :type ip: ``str`` + :param ip: IP address to check - @return: C{bool} if the specified IP address is private. + :return: ``bool`` if the specified IP address is private. """ priv_subnets = [{'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, @@ -1043,7 +1031,7 @@ def is_valid_ip_address(address, family=socket.AF_INET): """ Check if the provided address is valid IPv4 or IPv6 adddress. - @return: C{bool} True if the provided address is valid. + :return: ``bool`` True if the provided address is valid. """ try: socket.inet_pton(family, address) From 5fa1610cdad7400303af8d6cdfb4d19d6bb81d59 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 14 Jun 2013 21:50:50 -0700 Subject: [PATCH 132/143] Allow user to use a custom pricing file by placing a file to ~/.libcloud/pricing.json. --- libcloud/pricing.py | 43 ++++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/libcloud/pricing.py b/libcloud/pricing.py index 3ed32d22a6..db3b674fa1 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -25,27 +25,26 @@ import os.path from os.path import join as pjoin -PRICING_FILE_PATH = 'data/pricing.json' +CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) +DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json') +CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json') -PRICING_DATA = {} +# Pricing data cache +PRICING_DATA = { + 'compute': {}, + 'storage': {} +} VALID_PRICING_DRIVER_TYPES = ['compute', 'storage'] -def clear_pricing_data(): - PRICING_DATA.clear() - PRICING_DATA.update({ - 'compute': {}, - 'storage': {}, - }) -clear_pricing_data() - - def get_pricing_file_path(file_path=None): - pricing_directory = os.path.dirname(os.path.abspath(__file__)) - pricing_file_path = pjoin(pricing_directory, PRICING_FILE_PATH) + if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \ + os.path.isfile(CUSTOM_PRICING_FILE_PATH): + # Custom pricing file is available, use it + return CUSTOM_PRICING_FILE_PATH - return pricing_file_path + return DEFAULT_PRICING_FILE_PATH def get_pricing(driver_type, driver_name, pricing_file_path=None): @@ -58,6 +57,10 @@ def get_pricing(driver_type, driver_name, pricing_file_path=None): @type driver_name: C{str} @param driver_name: Driver name + @type pricing_file_path: C{str} + @param pricing_file_path: Custom path to a price file. If not provided + it uses a default path. + @rtype: C{dict} @return: Dictionary with pricing where a key name is size ID and the value is a price. @@ -126,12 +129,22 @@ def get_size_price(driver_type, driver_name, size_id): def invalidate_pricing_cache(): """ - Invalidate the cache for all the drivers. + Invalidate pricing cache for all the drivers. """ PRICING_DATA['compute'] = {} PRICING_DATA['storage'] = {} +def clear_pricing_data(): + """ + Invalidate pricing cache for all the drivers. + + Note: This method does the same thing as invalidate_pricing_cache and is + here for backward compatibility reasons. + """ + invalidate_pricing_cache() + + def invalidate_module_pricing_cache(driver_type, driver_name): """ Invalidate the cache for the specified driver. From 30500fedd0a48273b4a45ec9147d3f670b6e9710 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 29 Jul 2013 21:28:50 +0200 Subject: [PATCH 133/143] Add cli utility for updating pricing file. --- bin/libcloud | 30 +++++++++++++ libcloud/cli/__init__.py | 0 libcloud/cli/pricing.py | 84 ++++++++++++++++++++++++++++++++++++ libcloud/utils/connection.py | 31 +++++++++++++ 4 files changed, 145 insertions(+) create mode 100755 bin/libcloud create mode 100644 libcloud/cli/__init__.py create mode 100644 libcloud/cli/pricing.py create mode 100644 libcloud/utils/connection.py diff --git a/bin/libcloud b/bin/libcloud new file mode 100755 index 0000000000..270922e850 --- /dev/null +++ b/bin/libcloud @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import argparse + +from libcloud.cli.pricing import add_subparser, update_pricing + +parser = argparse.ArgumentParser(prog='libcloud', usage='%(prog)s') +subparsers = parser.add_subparsers(dest='subparser_name') +add_subparser(subparsers=subparsers) + +args = parser.parse_args() + +if args.subparser_name == 'update-pricing': + update_pricing(file_url=args.file_url, file_path=args.file_path) diff --git a/libcloud/cli/__init__.py b/libcloud/cli/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libcloud/cli/pricing.py b/libcloud/cli/pricing.py new file mode 100644 index 0000000000..584d292a77 --- /dev/null +++ b/libcloud/cli/pricing.py @@ -0,0 +1,84 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +try: + import simplejson as json +except ImportError: + import json + + +from libcloud.pricing import CUSTOM_PRICING_FILE_PATH +from libcloud.utils.connection import get_response_object + +__all__ = [ + 'add_subparser', + 'update_pricing' +] + +# Default URL to the pricing file +DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' + + +def add_subparser(subparsers): + parser = subparsers.add_parser('update-pricing', + help='Update Libcloud pricing file') + parser.add_argument('--file-path', dest='file_path', action='store', + default=CUSTOM_PRICING_FILE_PATH, + help='Path where the file will be saved') + parser.add_argument('--file-url', dest='file_url', action='store', + default=DEFAULT_FILE_URL, + help='URL to the pricing file') + return parser + + +def update_pricing(file_url, file_path): + dir_name = os.path.dirname(file_path) + + if not os.path.exists(dir_name): + # Verify a valid path is provided + sys.stderr.write('Can\'t write to %s, directory %s, doesn\'t exist\n' % + (file_path, dir_name)) + sys.exit(2) + + if os.path.exists(file_path) and os.path.isdir(file_path): + sys.stderr.write('Can\'t write to %s file path because it\'s a' + ' directory\n' % + (file_path)) + sys.exit(2) + + response = get_response_object(file_url) + body = response.body + + # Verify pricing file is valid + try: + data = json.loads(body) + except json.decoder.JSONDecodeError: + sys.stderr.write('Provided URL doesn\'t contain valid pricing' + ' data\n') + sys.exit(3) + + if not data.get('updated', None): + sys.stderr.write('Provided URL doesn\'t contain valid pricing' + ' data\n') + sys.exit(3) + + # No need to stream it since file is small + with open(file_path, 'w') as file_handle: + file_handle.write(response.body) + + print('Pricing file saved to %s' % (file_path)) diff --git a/libcloud/utils/connection.py b/libcloud/utils/connection.py new file mode 100644 index 0000000000..bd2b50d61b --- /dev/null +++ b/libcloud/utils/connection.py @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils.py3 import urlparse, parse_qs +from libcloud.common.base import Connection + +__all__ = [ + 'get_response_object' +] + + +def get_response_object(url): + parsed_url = urlparse.urlparse(url) + parsed_qs = parse_qs(parsed_url.query) + secure = parsed_url.scheme == 'https' + + con = Connection(secure=secure, host=parsed_url.netloc) + response = con.request(method='GET', action=parsed_url.path, params=parsed_qs) + return response From b1a0214057c1617b5857c492ed306be6e2db92f3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 29 Jul 2013 21:36:15 +0200 Subject: [PATCH 134/143] Add argparse dependency for Python < 2.6 and >= 3.1 and <= 3.2. --- libcloud/utils/py3.py | 4 ++++ setup.py | 11 +++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 206324e1fb..1c44c27c00 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -28,6 +28,7 @@ PY25 = False PY27 = False PY3 = False +PY31 = False PY32 = False if sys.version_info >= (2, 0) and sys.version_info < (3, 0): @@ -42,6 +43,9 @@ if sys.version_info >= (3, 0): PY3 = True +if sys.version_info >= (3, 1) and sys.version_info < (3, 2): + PY31 = True + if sys.version_info >= (3, 2) and sys.version_info < (3, 3): PY32 = True diff --git a/setup.py b/setup.py index fc82122d68..2bbeba7339 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ import libcloud.utils.misc from libcloud.utils.dist import get_packages, get_data_files -from libcloud.utils.py3 import unittest2_required +from libcloud.utils.py3 import unittest2_required, PY31, PY32 libcloud.utils.misc.SHOW_DEPRECATION_WARNING = False @@ -229,6 +229,13 @@ def run(self): cov.save() cov.html_report() +if pre_python26: + dependencies = ['ssl', 'simplejson', 'argparse'] +elif PY31 or PY32: + dependencies = ['argparse'] +else: + dependencies = [] + setup( name='apache-libcloud', @@ -238,7 +245,7 @@ def run(self): ' and documentation, please see http://libcloud.apache.org', author='Apache Software Foundation', author_email='dev@libcloud.apache.org', - requires=([], ['ssl', 'simplejson'],)[pre_python26], + requires=dependencies, packages=get_packages('libcloud'), package_dir={ 'libcloud': 'libcloud', From 11db73751bfa956bbd484ba0d9f6b72213ae453d Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 19:48:01 +0200 Subject: [PATCH 135/143] Add download_pricing_file function to libcloud.pricing module. --- libcloud/pricing.py | 61 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/libcloud/pricing.py b/libcloud/pricing.py index db3b674fa1..6e5befec48 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -13,17 +13,31 @@ # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement + """ A class which handles loading the pricing files. """ +import os.path +from os.path import join as pjoin + try: import simplejson as json except ImportError: import json -import os.path -from os.path import join as pjoin +from libcloud.utils.connection import get_response_object + +__all__ = [ + 'get_pricing', + 'get_size_price', + 'set_pricing', + 'clear_pricing_data', + 'download_pricing_file' +] + +# Default URL to the pricing file +DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json') @@ -157,3 +171,46 @@ def invalidate_module_pricing_cache(driver_type, driver_name): """ if driver_name in PRICING_DATA[driver_type]: del PRICING_DATA[driver_type][driver_name] + + +def download_pricing_file(file_url=DEFAULT_FILE_URL, + file_path=CUSTOM_PRICING_FILE_PATH): + """ + Download pricing file from the file_url and save it to file_path. + + @type file_url: C{str} + @param file_url: URL pointing to the pricing file. + + @type file_path: C{str} + @param file_path: Path where a download pricing file will be saved. + """ + dir_name = os.path.dirname(file_path) + + if not os.path.exists(dir_name): + # Verify a valid path is provided + msg = ('Can\'t write to %s, directory %s, doesn\'t exist' % + (file_path, dir_name)) + raise ValueError(msg) + + if os.path.exists(file_path) and os.path.isdir(file_path): + msg = ('Can\'t write to %s file path because it\'s a' + ' directory' % (file_path)) + raise ValueError(msg) + + response = get_response_object(file_url) + body = response.body + + # Verify pricing file is valid + try: + data = json.loads(body) + except json.decoder.JSONDecodeError: + msg = 'Provided URL doesn\'t contain valid pricing data' + raise Exception(msg) + + if not data.get('updated', None): + msg = 'Provided URL doesn\'t contain valid pricing data' + raise Exception(msg) + + # No need to stream it since file is small + with open(file_path, 'w') as file_handle: + file_handle.write(body) From e64365592bf63ac7b826ce1c3685bd417eef97ec Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 19:48:37 +0200 Subject: [PATCH 136/143] Remove CLI stuff since it will be part of a separate PR. --- bin/libcloud | 30 -------------- libcloud/cli/__init__.py | 0 libcloud/cli/pricing.py | 84 ---------------------------------------- 3 files changed, 114 deletions(-) delete mode 100755 bin/libcloud delete mode 100644 libcloud/cli/__init__.py delete mode 100644 libcloud/cli/pricing.py diff --git a/bin/libcloud b/bin/libcloud deleted file mode 100755 index 270922e850..0000000000 --- a/bin/libcloud +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import with_statement - -import argparse - -from libcloud.cli.pricing import add_subparser, update_pricing - -parser = argparse.ArgumentParser(prog='libcloud', usage='%(prog)s') -subparsers = parser.add_subparsers(dest='subparser_name') -add_subparser(subparsers=subparsers) - -args = parser.parse_args() - -if args.subparser_name == 'update-pricing': - update_pricing(file_url=args.file_url, file_path=args.file_path) diff --git a/libcloud/cli/__init__.py b/libcloud/cli/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/libcloud/cli/pricing.py b/libcloud/cli/pricing.py deleted file mode 100644 index 584d292a77..0000000000 --- a/libcloud/cli/pricing.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -try: - import simplejson as json -except ImportError: - import json - - -from libcloud.pricing import CUSTOM_PRICING_FILE_PATH -from libcloud.utils.connection import get_response_object - -__all__ = [ - 'add_subparser', - 'update_pricing' -] - -# Default URL to the pricing file -DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' - - -def add_subparser(subparsers): - parser = subparsers.add_parser('update-pricing', - help='Update Libcloud pricing file') - parser.add_argument('--file-path', dest='file_path', action='store', - default=CUSTOM_PRICING_FILE_PATH, - help='Path where the file will be saved') - parser.add_argument('--file-url', dest='file_url', action='store', - default=DEFAULT_FILE_URL, - help='URL to the pricing file') - return parser - - -def update_pricing(file_url, file_path): - dir_name = os.path.dirname(file_path) - - if not os.path.exists(dir_name): - # Verify a valid path is provided - sys.stderr.write('Can\'t write to %s, directory %s, doesn\'t exist\n' % - (file_path, dir_name)) - sys.exit(2) - - if os.path.exists(file_path) and os.path.isdir(file_path): - sys.stderr.write('Can\'t write to %s file path because it\'s a' - ' directory\n' % - (file_path)) - sys.exit(2) - - response = get_response_object(file_url) - body = response.body - - # Verify pricing file is valid - try: - data = json.loads(body) - except json.decoder.JSONDecodeError: - sys.stderr.write('Provided URL doesn\'t contain valid pricing' - ' data\n') - sys.exit(3) - - if not data.get('updated', None): - sys.stderr.write('Provided URL doesn\'t contain valid pricing' - ' data\n') - sys.exit(3) - - # No need to stream it since file is small - with open(file_path, 'w') as file_handle: - file_handle.write(response.body) - - print('Pricing file saved to %s' % (file_path)) From a1eda4e361f9f63c0b1a90229a976a173961dc06 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 19:51:04 +0200 Subject: [PATCH 137/143] Revert "Add argparse dependency for Python < 2.6 and >= 3.1 and <= 3.2." This reverts commit a62b97a098441603832deb239307a8cd7cf53029. --- libcloud/utils/py3.py | 4 ---- setup.py | 11 ++--------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 1c44c27c00..206324e1fb 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -28,7 +28,6 @@ PY25 = False PY27 = False PY3 = False -PY31 = False PY32 = False if sys.version_info >= (2, 0) and sys.version_info < (3, 0): @@ -43,9 +42,6 @@ if sys.version_info >= (3, 0): PY3 = True -if sys.version_info >= (3, 1) and sys.version_info < (3, 2): - PY31 = True - if sys.version_info >= (3, 2) and sys.version_info < (3, 3): PY32 = True diff --git a/setup.py b/setup.py index 2bbeba7339..fc82122d68 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ import libcloud.utils.misc from libcloud.utils.dist import get_packages, get_data_files -from libcloud.utils.py3 import unittest2_required, PY31, PY32 +from libcloud.utils.py3 import unittest2_required libcloud.utils.misc.SHOW_DEPRECATION_WARNING = False @@ -229,13 +229,6 @@ def run(self): cov.save() cov.html_report() -if pre_python26: - dependencies = ['ssl', 'simplejson', 'argparse'] -elif PY31 or PY32: - dependencies = ['argparse'] -else: - dependencies = [] - setup( name='apache-libcloud', @@ -245,7 +238,7 @@ def run(self): ' and documentation, please see http://libcloud.apache.org', author='Apache Software Foundation', author_email='dev@libcloud.apache.org', - requires=dependencies, + requires=([], ['ssl', 'simplejson'],)[pre_python26], packages=get_packages('libcloud'), package_dir={ 'libcloud': 'libcloud', From caa7ab9fdb99a099dab683e550d7b4e782e85713 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 31 Jul 2013 19:54:20 +0200 Subject: [PATCH 138/143] Fix a typo. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fc82122d68..1ed4ebdf6e 100644 --- a/setup.py +++ b/setup.py @@ -119,7 +119,7 @@ def _run_tests(self): if mtime_dist > mtime_current: print("It looks like test/secrets.py file is out of date.") - print("Please copy the new secret.py-dist file over otherwise" + + print("Please copy the new secrets.py-dist file over otherwise" + " tests might fail") if pre_python26: From 5820e01886fa9ed73261c5e006b511d8873a2a4a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 1 Aug 2013 14:31:22 +0200 Subject: [PATCH 139/143] Update changes. --- CHANGES | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGES b/CHANGES index a308d8b97e..b9917a69cf 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,17 @@ Changes with Apache Libcloud in development + *) General + + - By default read pricing data from ~/.libcloud/pricing.json if this file + exists. If it doesn't it uses old behavior and falls back to pricing file + bundled with a libcloud release. + [Tomaz Muraus] + + - Add libcloud.pricing.download_pricing_file function for downloading and + updating the pricing file. + [Tomaz Muraus] + *) Compute - Modify ElasticHosts drive to store drive UUID in 'extra' field. From 57e6f4ec59e22687e9f2642dbf5bea9d04aa342d Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 1 Aug 2013 18:35:21 +0200 Subject: [PATCH 140/143] Add travis config. --- .travis.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..f750c18b64 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +language: python +python: 2.7 +env: + - TOX_ENV=py25 + - TOX_ENV=py26 + - TOX_ENV=py27 + - TOX_ENV=pypy + - TOX_ENV=py32 + - TOX_ENV=py33 + +install: + - pip install tox + +script: + - tox -e $TOX_ENV From 506c0fc07aaeb5c7477c3ab920d5e1a48777bb12 Mon Sep 17 00:00:00 2001 From: John Carr Date: Thu, 1 Aug 2013 18:11:42 +0100 Subject: [PATCH 141/143] Fix create_node feature metadata (LIBCLOUD-367) Signed-off-by: Tomaz Muraus Conflicts: libcloud/compute/drivers/ec2.py --- libcloud/compute/base.py | 83 +++++++++++++++++------- libcloud/compute/drivers/abiquo.py | 7 +- libcloud/compute/drivers/bluebox.py | 10 +-- libcloud/compute/drivers/brightbox.py | 1 - libcloud/compute/drivers/digitalocean.py | 1 - libcloud/compute/drivers/ec2.py | 9 +-- libcloud/compute/drivers/hostvirtual.py | 10 ++- libcloud/compute/drivers/linode.py | 7 +- libcloud/compute/drivers/opsource.py | 15 +++-- libcloud/compute/drivers/rimuhosting.py | 10 +-- libcloud/compute/drivers/vcloud.py | 11 ++-- libcloud/test/compute/test_base.py | 56 ++++++++++++++++ 12 files changed, 157 insertions(+), 63 deletions(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 53a22314a4..545ca00bd2 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -358,8 +358,9 @@ class NodeAuthPassword(object): """ A password to be used for authentication to a node. """ - def __init__(self, password): + def __init__(self, password, generated=False): self.password = password + self.generated = generated def __repr__(self): return '' @@ -478,6 +479,41 @@ def __init__(self, key, secret=None, secure=True, host=None, port=None, host=host, port=port, api_version=api_version, **kwargs) + def _get_and_check_auth(self, auth): + """ + Helper function for providers supporting L{NodeAuthPassword} or + L{NodeAuthSSHKey} + + Validates that only a supported object type is passed to the auth + parameter and raises an exception if it is not. + + If no L{NodeAuthPassword} object is provided but one is expected then a + password is automatically generated. + """ + + if isinstance(auth, NodeAuthPassword): + if 'password' in self.features['create_node']: + return auth + raise LibcloudError( + 'Password provided as authentication information, but password' + 'not supported', driver=self) + + if isinstance(auth, NodeAuthSSHKey): + if 'ssh_key' in self.features['create_node']: + return auth + raise LibcloudError( + 'SSH Key provided as authentication information, but SSH Key' + 'not supported', driver=self) + + if 'password' in self.features['create_node']: + value = os.urandom(16) + return NodeAuthPassword(binascii.hexlify(value), generated=True) + + if auth: + raise LibcloudError( + '"auth" argument provided, but it was not a NodeAuthPassword' + 'or NodeAuthSSHKey object', driver=self) + def create_node(self, **kwargs): """Create a new node instance. @@ -582,8 +618,8 @@ def deploy_node(self, **kwargs): """ Create a new node, and start deployment. - Depends on a Provider Driver supporting either using a specific - password or returning a generated password. + Depends on user providing authentication information or the Provider + Driver generating a password and returning it. This function may raise a :class:`DeploymentException`, if a create_node call was successful, but there is a later error (like SSH failing or @@ -656,29 +692,33 @@ def deploy_node(self, **kwargs): raise RuntimeError('paramiko is not installed. You can install ' + 'it using pip: pip install paramiko') - password = None - - if 'create_node' not in self.features: - raise NotImplementedError( - 'deploy_node not implemented for this driver') - elif 'generates_password' not in self.features["create_node"]: - if 'password' not in self.features["create_node"] and \ - 'ssh_key' not in self.features["create_node"]: + if 'auth' in kwargs: + auth = kwargs['auth'] + if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)): + raise NotImplementedError( + 'If providing auth, only NodeAuthSSHKey or' + 'NodeAuthPassword is supported') + elif 'ssh_key' in kwargs: + # If an ssh_key is provided we can try deploy_node + pass + elif 'create_node' in self.features: + f = self.features['create_node'] + if not 'generates_password' in f and not "password" in f: raise NotImplementedError( 'deploy_node not implemented for this driver') - - if 'auth' not in kwargs: - value = os.urandom(16) - kwargs['auth'] = NodeAuthPassword(binascii.hexlify(value)) - - if 'ssh_key' not in kwargs: - password = kwargs['auth'].password + else: + raise NotImplementedError( + 'deploy_node not implemented for this driver') node = self.create_node(**kwargs) max_tries = kwargs.get('max_tries', 3) - if 'generates_password' in self.features['create_node']: - password = node.extra.get('password') + password = None + if 'auth' in kwargs: + if isinstance(kwargs['auth'], NodeAuthPassword): + password = kwargs['auth'].password + elif 'password' in node.extra: + password = node.extra['password'] ssh_interface = kwargs.get('ssh_interface', 'public_ips') @@ -693,9 +733,6 @@ def deploy_node(self, **kwargs): e = sys.exc_info()[1] raise DeploymentError(node=node, original_exception=e, driver=self) - if password: - node.extra['password'] = password - ssh_username = kwargs.get('ssh_username', 'root') ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', []) ssh_port = kwargs.get('ssh_port', 22) diff --git a/libcloud/compute/drivers/abiquo.py b/libcloud/compute/drivers/abiquo.py index 7a10a99f6e..98601cc157 100644 --- a/libcloud/compute/drivers/abiquo.py +++ b/libcloud/compute/drivers/abiquo.py @@ -40,7 +40,6 @@ class AbiquoNodeDriver(NodeDriver): name = 'Abiquo' website = 'http://www.abiquo.com/' connectionCls = AbiquoConnection - features = {'create_node': ['password']} timeout = 2000 # some images take a lot of time! # Media Types @@ -104,10 +103,6 @@ def create_node(self, **kwargs): undefined behavoir will be selected. (optional) @type location: L{NodeLocation} - @keyword auth: Initial authentication information for the node - (optional) - @type auth: L{NodeAuthPassword} - @keyword group_name: Which group this node belongs to. If empty, it will be created into 'libcloud' group. If it does not found any group in the target @@ -684,7 +679,7 @@ def _define_create_node_group(self, xml_loc, loc, **kwargs): for vapp in vapps_element.findall('virtualAppliance'): if vapp.findtext('name') == group_name: uri_vapp = get_href(vapp, 'edit') - return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp) + return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp) # target group not found: create it. Since it is an extension of # the basic 'libcloud' functionality, we try to be as flexible as diff --git a/libcloud/compute/drivers/bluebox.py b/libcloud/compute/drivers/bluebox.py index ee9b3cee06..8dc1ba2db4 100644 --- a/libcloud/compute/drivers/bluebox.py +++ b/libcloud/compute/drivers/bluebox.py @@ -135,6 +135,7 @@ class BlueboxNodeDriver(NodeDriver): api_name = 'bluebox' name = 'Bluebox Blocks' website = 'http://bluebox.net' + features = {'create_node': ['ssh_key', 'password']} def list_nodes(self): result = self.connection.request('/api/blocks.json') @@ -166,10 +167,7 @@ def create_node(self, **kwargs): image = kwargs['image'] size = kwargs['size'] - try: - auth = kwargs['auth'] - except Exception: - raise Exception("SSH public key or password required.") + auth = self._get_and_check_auth(kwargs.get('auth')) data = { 'hostname': name, @@ -197,6 +195,10 @@ def create_node(self, **kwargs): result = self.connection.request('/api/blocks.json', headers=headers, data=params, method='POST') node = self._to_node(result.object) + + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + return node def destroy_node(self, node): diff --git a/libcloud/compute/drivers/brightbox.py b/libcloud/compute/drivers/brightbox.py index 1747fdb46e..ce307ea8b1 100644 --- a/libcloud/compute/drivers/brightbox.py +++ b/libcloud/compute/drivers/brightbox.py @@ -44,7 +44,6 @@ class BrightboxNodeDriver(NodeDriver): type = Provider.BRIGHTBOX name = 'Brightbox' website = 'http://www.brightbox.co.uk/' - features = {'create_node': ['ssh_key']} NODE_STATE_MAP = {'creating': NodeState.PENDING, 'active': NodeState.RUNNING, diff --git a/libcloud/compute/drivers/digitalocean.py b/libcloud/compute/drivers/digitalocean.py index 0f4ee4083c..09ace5f386 100644 --- a/libcloud/compute/drivers/digitalocean.py +++ b/libcloud/compute/drivers/digitalocean.py @@ -75,7 +75,6 @@ class DigitalOceanNodeDriver(NodeDriver): type = Provider.DIGITAL_OCEAN name = 'Digital Ocean' website = 'https://www.digitalocean.com' - features = {'create_node': ['ssh_key']} NODE_STATE_MAP = {'new': NodeState.PENDING, 'off': NodeState.REBOOTING, diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index aecfdb2495..17702fd199 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -423,7 +423,6 @@ class BaseEC2NodeDriver(NodeDriver): connectionCls = EC2Connection path = '/' - features = {'create_node': ['ssh_key']} NODE_STATE_MAP = { 'pending': NodeState.PENDING, @@ -1333,13 +1332,15 @@ def create_node(self, **kwargs): if 'ex_blockdevicemappings' in kwargs: if not isinstance(kwargs['ex_blockdevicemappings'], (list, tuple)): - raise AttributeError('ex_blockdevicemappings not list or tuple') + raise AttributeError( + 'ex_blockdevicemappings not list or tuple') for idx, mapping in enumerate(kwargs['ex_blockdevicemappings']): idx += 1 # we want 1-based indexes if not isinstance(mapping, dict): - raise AttributeError('mapping %s in ex_blockdevicemappings ' - 'not a dict' % mapping) + raise AttributeError( + 'mapping %s in ex_blockdevicemappings ' + 'not a dict' % mapping) for k, v in mapping.items(): params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v) diff --git a/libcloud/compute/drivers/hostvirtual.py b/libcloud/compute/drivers/hostvirtual.py index e311224d52..de5e73da43 100644 --- a/libcloud/compute/drivers/hostvirtual.py +++ b/libcloud/compute/drivers/hostvirtual.py @@ -61,6 +61,7 @@ class HostVirtualNodeDriver(NodeDriver): name = 'HostVirtual' website = 'http://www.vr.org' connectionCls = HostVirtualComputeConnection + features = {'create_node': ['ssh_key', 'password']} def __init__(self, key): self.location = None @@ -164,6 +165,8 @@ def create_node(self, **kwargs): size = kwargs['size'] image = kwargs['image'] + auth = self._get_and_check_auth(kwargs.get('auth')) + params = {'plan': size.name} dc = DEFAULT_NODE_LOCATION_ID @@ -186,9 +189,12 @@ def create_node(self, **kwargs): }) # provisioning a server using the stub node - self.ex_provision_node(node=stub_node, auth=kwargs['auth']) - + self.ex_provision_node(node=stub_node, auth=auth) node = self._wait_for_node(stub_node.id) + + if getattr(auth, 'generated', False): + node.extra['password'] = auth.password + return node def reboot_node(self, node): diff --git a/libcloud/compute/drivers/linode.py b/libcloud/compute/drivers/linode.py index 86eb61c7f9..c5cae83dc8 100644 --- a/libcloud/compute/drivers/linode.py +++ b/libcloud/compute/drivers/linode.py @@ -208,7 +208,7 @@ def create_node(self, **kwargs): name = kwargs["name"] image = kwargs["image"] size = kwargs["size"] - auth = kwargs["auth"] + auth = self._get_and_check_auth(kwargs["auth"]) # Pick a location (resolves LIBCLOUD-41 in JIRA) if "location" in kwargs: @@ -372,7 +372,10 @@ def create_node(self, **kwargs): nodes = self._to_nodes(data) if len(nodes) == 1: - return nodes[0] + node = nodes[0] + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + return node return None diff --git a/libcloud/compute/drivers/opsource.py b/libcloud/compute/drivers/opsource.py index 8b89ea8b69..5796a44579 100644 --- a/libcloud/compute/drivers/opsource.py +++ b/libcloud/compute/drivers/opsource.py @@ -281,12 +281,8 @@ def create_node(self, **kwargs): # cannot be set at create time because size is part of the # image definition. password = None - if 'auth' in kwargs: - auth = kwargs.get('auth') - if isinstance(auth, NodeAuthPassword): - password = auth.password - else: - raise ValueError('auth must be of NodeAuthPassword type') + auth = self._get_and_check_auth(kwargs.get('auth')) + password = auth.password ex_description = kwargs.get('ex_description', '') ex_isStarted = kwargs.get('ex_isStarted', True) @@ -319,7 +315,12 @@ def create_node(self, **kwargs): # XXX: return the last node in the list that has a matching name. this # is likely but not guaranteed to be the node we just created # because opsource allows multiple nodes to have the same name - return list(filter(lambda x: x.name == name, self.list_nodes()))[-1] + node = list(filter(lambda x: x.name == name, self.list_nodes()))[-1] + + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + + return node def destroy_node(self, node): body = self.connection.request_with_orgId( diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index eb81a91360..6db9b0d791 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -116,6 +116,7 @@ class RimuHostingNodeDriver(NodeDriver): name = 'RimuHosting' website = 'http://rimuhosting.com/' connectionCls = RimuHostingConnection + features = {'create_node': ['password']} def __init__(self, key, host=API_HOST, port=443, api_context=API_CONTEXT, secure=True): @@ -283,11 +284,8 @@ def create_node(self, **kwargs): data['instantiation_options']['control_panel'] = \ kwargs['ex_control_panel'] - if 'auth' in kwargs: - auth = kwargs['auth'] - if not isinstance(auth, NodeAuthPassword): - raise ValueError('auth must be of NodeAuthPassword type') - data['instantiation_options']['password'] = auth.password + auth = self._get_and_check_auth(kwargs.get('auth')) + data['instantiation_options']['password'] = auth.password if 'ex_billing_oid' in kwargs: #TODO check for valid oid. @@ -345,5 +343,3 @@ def list_locations(self): NodeLocation('DCLONDON', "RimuHosting London", 'GB', self), NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self), ] - - features = {"create_node": ["password"]} diff --git a/libcloud/compute/drivers/vcloud.py b/libcloud/compute/drivers/vcloud.py index e7d9a7e92a..02638e652d 100644 --- a/libcloud/compute/drivers/vcloud.py +++ b/libcloud/compute/drivers/vcloud.py @@ -718,12 +718,8 @@ def create_node(self, **kwargs): network = '' password = None - if 'auth' in kwargs: - auth = kwargs['auth'] - if isinstance(auth, NodeAuthPassword): - password = auth.password - else: - raise ValueError('auth must be of NodeAuthPassword type') + auth = self._get_and_check_auth(kwargs.get('auth')) + password = auth.password instantiate_xml = InstantiateVAppXML( name=name, @@ -759,6 +755,9 @@ def create_node(self, **kwargs): res = self.connection.request(vapp_path) node = self._to_node(res.object) + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + return node features = {"create_node": ["password"]} diff --git a/libcloud/test/compute/test_base.py b/libcloud/test/compute/test_base.py index 17081c128d..750d527118 100644 --- a/libcloud/test/compute/test_base.py +++ b/libcloud/test/compute/test_base.py @@ -17,7 +17,9 @@ from libcloud.common.base import Response from libcloud.common.base import Connection, ConnectionKey, ConnectionUserAndKey +from libcloud.common.types import LibcloudError from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver +from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword from libcloud.test import MockResponse # pylint: disable-msg=E0611 @@ -52,5 +54,59 @@ def test_base_connection_userkey(self): def test_base_connection_timeout(self): Connection(timeout=10) + +class TestValidateAuth(unittest.TestCase): + + def test_get_auth_ssh(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + auth = NodeAuthSSHKey('pubkey...') + self.assertEqual(auth, n._get_and_check_auth(auth)) + + def test_get_auth_ssh_but_given_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + auth = NodeAuthPassword('password') + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = NodeAuthPassword('password') + self.assertEqual(auth, n._get_and_check_auth(auth)) + + def test_get_auth_password_but_given_ssh_key(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = NodeAuthSSHKey('publickey') + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_default_ssh_key(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + self.assertEqual(None, n._get_and_check_auth(None)) + + def test_get_auth_default_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = n._get_and_check_auth(None) + self.assertTrue(isinstance(auth, NodeAuthPassword)) + + def test_get_auth_default_no_feature(self): + n = NodeDriver('foo') + self.assertEqual(None, n._get_and_check_auth(None)) + + def test_get_auth_generates_password_but_given_nonsense(self): + n = NodeDriver('foo') + n.features = {'create_node': ['generates_password']} + auth = "nonsense" + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_no_features_but_given_nonsense(self): + n = NodeDriver('foo') + auth = "nonsense" + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + if __name__ == '__main__': sys.exit(unittest.main()) From ee54c26e025ddd9ef5fc1c8661d8c1da641dc883 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 1 Aug 2013 19:51:09 +0200 Subject: [PATCH 142/143] Fix a bug with encoding in Python 3 which was exposed by previous commit. In Python 3, binascii.hexlify returns bytes, but we want a str. --- libcloud/common/openstack.py | 2 +- libcloud/compute/base.py | 3 ++- libcloud/compute/deployment.py | 4 +++- libcloud/compute/drivers/ecp.py | 2 +- libcloud/storage/drivers/azure_blobs.py | 10 ++++++---- 5 files changed, 13 insertions(+), 8 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index ba1425a177..622a27547c 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -605,7 +605,7 @@ def _populate_hosts_and_request_paths(self): self._tuple_from_url(url) def _add_cache_busting_to_params(self, params): - cache_busting_number = binascii.hexlify(os.urandom(8)) + cache_busting_number = binascii.hexlify(os.urandom(8)).decode('ascii') if isinstance(params, dict): params['cache-busting'] = cache_busting_number diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 545ca00bd2..0a84dd248d 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -507,7 +507,8 @@ def _get_and_check_auth(self, auth): if 'password' in self.features['create_node']: value = os.urandom(16) - return NodeAuthPassword(binascii.hexlify(value), generated=True) + value = binascii.hexlify(value).decode('ascii') + return NodeAuthPassword(value, generated=True) if auth: raise LibcloudError( diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index 103315cde7..d0925b5546 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -142,7 +142,9 @@ def __init__(self, script, name=None, delete=False): if self.name is None: # File is put under user's home directory # (~/libcloud_deployment_.sh) - self.name = 'libcloud_deployment_%s.sh' % (binascii.hexlify(os.urandom(4))) + random_string = binascii.hexlify(os.urandom(4)) + random_string = random_string.decode('ascii') + self.name = 'libcloud_deployment_%s.sh' % (random_string) def run(self, node, client): """ diff --git a/libcloud/compute/drivers/ecp.py b/libcloud/compute/drivers/ecp.py index dff49c07d3..aa77440fae 100644 --- a/libcloud/compute/drivers/ecp.py +++ b/libcloud/compute/drivers/ecp.py @@ -102,7 +102,7 @@ def _encode_multipart_formdata(self, fields): #use a random boundary that does not appear in the fields boundary = '' while boundary in ''.join(fields): - boundary = u(binascii.hexlify(os.urandom(16))) + boundary = binascii.hexlify(os.urandom(16)).decode('utf-8') L = [] for i in fields: L.append('--' + boundary) diff --git a/libcloud/storage/drivers/azure_blobs.py b/libcloud/storage/drivers/azure_blobs.py index 5512d700a0..06e4511c76 100644 --- a/libcloud/storage/drivers/azure_blobs.py +++ b/libcloud/storage/drivers/azure_blobs.py @@ -295,8 +295,9 @@ def _xml_to_object(self, container, blob): } if extra['md5_hash']: - extra['md5_hash'] = binascii.hexlify( - base64.b64decode(b(extra['md5_hash']))) + value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) + value = value.decode('ascii') + extra['md5_hash'] = value meta_data = {} for meta in metadata.getchildren(): @@ -344,8 +345,9 @@ def _response_to_object(self, object_name, container, response): } if extra['md5_hash']: - extra['md5_hash'] = binascii.hexlify( - base64.b64decode(b(extra['md5_hash']))) + value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) + value = value.decode('ascii') + extra['md5_hash'] = value meta_data = {} for key, value in response.headers.items(): From 8fa65a0fbd5c1febbac13d5de2d98175f0e9d1c5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 1 Aug 2013 20:35:50 +0200 Subject: [PATCH 143/143] Update changes. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index b9917a69cf..8cc5e94ed3 100644 --- a/CHANGES +++ b/CHANGES @@ -33,6 +33,10 @@ Changes with Apache Libcloud in development - Add Ubuntu Linux 12.04 image to ElasticHosts driver. (LIBCLOUD-364) [Bob Thompson] + - Fix create_node "features" metadata and update affected drivers. + (LIBCLOUD-367) + [John Carr] + *) Storage - Fix a regression with calling encode_container_name instead of